diff --git a/docs/reference/domains/kubernetes-domain.md b/docs/reference/domains/kubernetes-domain.md index 6d1026cf..24cb84d1 100644 --- a/docs/reference/domains/kubernetes-domain.md +++ b/docs/reference/domains/kubernetes-domain.md @@ -70,6 +70,50 @@ domain: file: '' # Optional - File name where resource(s) to create are stored; Only optional if manifest is not specified. Currently does not support relative paths. ``` +In addition to simply creating and reading individual resources, you can create a resource, wait for it to be ready, then read the possible children resources that should be created. For example the following `kubernetes-spec` will create a deployment, wait for it to be ready, and then read the pods that should be children of that deployment: + +```yaml +domain: + type: kubernetes + kubernetes-spec: + create-resources: + - name: testDeploy + manifest: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: test-deployment + namespace: validation-test + spec: + replicas: 1 + selector: + matchLabels: + app: test-app + template: + metadata: + labels: + app: test-app + spec: + containers: + - name: test-container + image: nginx + wait: + group: apps + version: v1 + resource: deployments + name: test-deployment + namespace: validation-test + resources: + - name: validationTestPods + resource-rule: + version: v1 + resource: pods + namespaces: [validation-test] +``` + +> [!NOTE] +> The `create-resources` is evaluated prior to the `wait`, and `wait` is evaluated prior to the `resources`. + ## Lists vs Named Resource When Lula retrieves all targeted resources (bounded by namespace when applicable), the payload is a list of resources. When a resource Name is specified - the payload will be a single object. diff --git a/src/pkg/domains/kubernetes/cluster.go b/src/pkg/domains/kubernetes/cluster.go index bf4fc03f..f90386d6 100644 --- a/src/pkg/domains/kubernetes/cluster.go +++ b/src/pkg/domains/kubernetes/cluster.go @@ -89,5 +89,5 @@ func (c *Cluster) validateAndGetGVR(group, version, resource string) (*metav1.AP } } - return nil, fmt.Errorf("resource %s not found in group %s version %s", resource, group, version) + return nil, fmt.Errorf("resource %s not found in group, %s, version, %s", resource, group, version) } diff --git a/src/pkg/domains/kubernetes/create.go b/src/pkg/domains/kubernetes/create.go index 53add621..6ca15bf1 100644 --- a/src/pkg/domains/kubernetes/create.go +++ b/src/pkg/domains/kubernetes/create.go @@ -22,14 +22,14 @@ import ( "sigs.k8s.io/e2e-framework/klient/wait/conditions" ) -// CreateE2E() creates the test resources, reads status, and destroys them -func CreateE2E(ctx context.Context, cluster *Cluster, resources []CreateResource) (map[string]interface{}, error) { +// CreateAllResources() creates all resources and returns their status +func CreateAllResources(ctx context.Context, cluster *Cluster, resources []CreateResource) (map[string]interface{}, []string, error) { collections := make(map[string]interface{}, len(resources)) namespaces := make([]string, 0) var errList []string if cluster == nil { - return nil, fmt.Errorf("cluster is nil") + return nil, nil, fmt.Errorf("cluster is nil") } // Create the resources, collect the outcome @@ -43,7 +43,7 @@ func CreateE2E(ctx context.Context, cluster *Cluster, resources []CreateResource message.Debugf("error creating namespace %s: %v", resource.Namespace, err) errList = append(errList, err.Error()) } - // Only add to list if not already in cluster + // Only add to list of resources to clean up if not already in cluster if new { namespaces = append(namespaces, resource.Namespace) } @@ -64,25 +64,17 @@ func CreateE2E(ctx context.Context, cluster *Cluster, resources []CreateResource errList = append(errList, err.Error()) } } else { - // return nil, errors.New("resource must have either manifest or file specified") errList = append(errList, "resource must have either manifest or file specified") } collections[resource.Name] = collection } - // Destroy the resources - if err := DestroyAllResources(ctx, cluster.kclient, collections, namespaces); err != nil { - // If a resource can't be destroyed, return the error (include retry logic??) - message.Debugf("error destroying all resources: %v", err) - errList = append(errList, err.Error()) - } - // Check if there were any errors if len(errList) > 0 { - return nil, errors.New("errors encountered: " + strings.Join(errList, "; ")) + return nil, nil, errors.New("errors creating resources encountered: " + strings.Join(errList, "; ")) } - return collections, nil + return collections, namespaces, nil } // CreateResourceFromManifest() creates the resource from the manifest string @@ -152,7 +144,7 @@ func DestroyAllResources(ctx context.Context, client klient.Client, collections // Check if there were any errors if len(errList) > 0 { - return errors.New("errors encountered: " + strings.Join(errList, "; ")) + return errors.New("errors encountered destroying resources: " + strings.Join(errList, "; ")) } return nil @@ -160,16 +152,12 @@ func DestroyAllResources(ctx context.Context, client klient.Client, collections // createResource() creates a resource in a k8s cluster func createResource(ctx context.Context, client klient.Client, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { - // Modify the obj name to avoid collisions - // Omitting this - if you want to check a specific object name, this gets in the way. Additionally, probably aren't running in such quick succession that this is necessary - //obj.SetName(envconf.RandomName(obj.GetName(), 16)) - // Create the object -> error returned when object is unable to be created if err := client.Resources().Create(ctx, obj); err != nil { return nil, err } - // Wait for object to exist -> Times out at 10 seconds + // Wait for object to exist -> Times out at 30 seconds conditionFunc := func(obj k8s.Object) bool { if err := client.Resources().Get(ctx, obj.GetName(), obj.GetNamespace(), obj); err != nil { return false @@ -178,12 +166,12 @@ func createResource(ctx context.Context, client klient.Client, obj *unstructured } if err := wait.For( conditions.New(client.Resources()).ResourceMatch(obj, conditionFunc), - wait.WithTimeout(time.Second*10), + wait.WithTimeout(time.Second*30), ); err != nil { return nil, nil // Not returning error, just assuming that the object was blocked or not created } - // Add pause for resources to do thier thang + // Add pause for resources to do thier thang -> this should be subsumed by the addition of wait and resources time.Sleep(time.Second * 2) // Not sure if this is enough time, need to test with more complex resources // Get the object to return @@ -201,10 +189,10 @@ func destroyResource(ctx context.Context, client klient.Client, obj *unstructure return err } - // Wait for object to be removed from the cluster -> Times out at 30 seconds + // Wait for object to be removed from the cluster -> Times out at 5 minutes if err := wait.For( conditions.New(client.Resources()).ResourceDeleted(obj), - wait.WithTimeout(time.Second*30), + wait.WithTimeout(time.Minute*5), ); err != nil { return err // Object is unable to be deleted... retry logic? Or just return error? } diff --git a/src/pkg/domains/kubernetes/types.go b/src/pkg/domains/kubernetes/spec.go similarity index 82% rename from src/pkg/domains/kubernetes/types.go rename to src/pkg/domains/kubernetes/spec.go index 90351d0f..2e1b55c3 100644 --- a/src/pkg/domains/kubernetes/types.go +++ b/src/pkg/domains/kubernetes/spec.go @@ -88,30 +88,55 @@ func CreateKubernetesDomain(spec *KubernetesSpec) (types.Domain, error) { // GetResources returns the resources from the Kubernetes domain // Evaluates the `create-resources` first, `wait` second, and finally `resources` last -func (k KubernetesDomain) GetResources(ctx context.Context) (resources types.DomainResources, err error) { +func (k KubernetesDomain) GetResources(ctx context.Context) (types.DomainResources, error) { + createdResources := make(types.DomainResources) + resources := make(types.DomainResources) + var namespaces []string + cluster, err := GetCluster() if err != nil { return nil, err } + // Evaluate the create-resources parameter + if k.Spec.CreateResources != nil { + createdResources, namespaces, err = CreateAllResources(ctx, cluster, k.Spec.CreateResources) + if err != nil { + return nil, fmt.Errorf("error in create: %v", err) + } + // Destroy the resources after everything else has been evaluated + defer func() { + if cleanupErr := DestroyAllResources(ctx, cluster.kclient, createdResources, namespaces); cleanupErr != nil { + if err == nil { + err = cleanupErr + } + } + }() + } + // Evaluate the wait condition if k.Spec.Wait != nil { err := EvaluateWait(ctx, cluster, *k.Spec.Wait) if err != nil { - return nil, err + return nil, fmt.Errorf("error in wait: %v", err) } } - // TODO: Return both? + // Evaluate the resources parameter if k.Spec.Resources != nil { resources, err = QueryCluster(ctx, cluster, k.Spec.Resources) if err != nil { - return nil, err + return nil, fmt.Errorf("error in query: %v", err) } - } else if k.Spec.CreateResources != nil { - resources, err = CreateE2E(ctx, cluster, k.Spec.CreateResources) - if err != nil { - return nil, err + } + + // Join the resources and createdResources + // Note - resource keys must be unique + if len(resources) == 0 { + return createdResources, nil + } else { + for k, v := range createdResources { + resources[k] = v } } diff --git a/src/pkg/domains/kubernetes/types_test.go b/src/pkg/domains/kubernetes/spec_test.go similarity index 100% rename from src/pkg/domains/kubernetes/types_test.go rename to src/pkg/domains/kubernetes/spec_test.go diff --git a/src/pkg/domains/kubernetes/wait.go b/src/pkg/domains/kubernetes/wait.go index 987ed30b..8926c469 100644 --- a/src/pkg/domains/kubernetes/wait.go +++ b/src/pkg/domains/kubernetes/wait.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/defenseunicorns/lula/src/pkg/message" pkgkubernetes "github.com/defenseunicorns/pkg/kubernetes" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/cli-utils/pkg/object" @@ -16,7 +17,7 @@ func EvaluateWait(ctx context.Context, cluster *Cluster, waitPayload Wait) error } // TODO: incorporate wait for multiple objects? - obj, err := globalCluster.validateAndGetGVR(waitPayload.Group, waitPayload.Version, waitPayload.Resource) + obj, err := cluster.validateAndGetGVR(waitPayload.Group, waitPayload.Version, waitPayload.Resource) if err != nil { return fmt.Errorf("unable to validate GVR: %v", err) } @@ -24,7 +25,7 @@ func EvaluateWait(ctx context.Context, cluster *Cluster, waitPayload Wait) error Name: waitPayload.Name, Namespace: waitPayload.Namespace, GroupKind: schema.GroupKind{ - Group: obj.Group, + Group: waitPayload.Group, Kind: obj.Kind, }, } @@ -42,6 +43,6 @@ func EvaluateWait(ctx context.Context, cluster *Cluster, waitPayload Wait) error } waitCtx, waitCancel := context.WithTimeout(ctx, duration) defer waitCancel() - + message.Debugf("Waiting for %s %s/%s to be ready", waitPayload.Resource, waitPayload.Name, waitPayload.Namespace) return pkgkubernetes.WaitForReady(waitCtx, cluster.watcher, []object.ObjMetadata{objMeta}) } diff --git a/src/test/e2e/create_resource_data_test.go b/src/test/e2e/create_resource_data_test.go index b2bbf0c7..43e7d77d 100644 --- a/src/test/e2e/create_resource_data_test.go +++ b/src/test/e2e/create_resource_data_test.go @@ -6,6 +6,7 @@ import ( "github.com/defenseunicorns/lula/src/cmd/validate" "github.com/defenseunicorns/lula/src/pkg/message" + appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -87,6 +88,49 @@ func TestCreateResourceDataValidation(t *testing.T) { return ctx }). + Assess("Validate Create Resource With Wait and Read", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { + oscalPath := "./scenarios/create-resources/oscal-component-wait-read.yaml" + message.NoProgress = true + + // TODO: fix this nonsense + validate.ConfirmExecution = true + validate.RunNonInteractively = true + validate.SaveResources = false + + assessment, err := validate.ValidateOnPath(context.Background(), oscalPath, "") + if err != nil { + t.Fatal(err) + } + + if len(assessment.Results) == 0 { + t.Fatal("Expected greater than zero results") + } + + result := assessment.Results[0] + + if result.Findings == nil { + t.Fatal("Expected findings to be not nil") + } + + for _, finding := range *result.Findings { + state := finding.Target.Status.State + if state != "satisfied" { + t.Fatal("State should be satisfied, but got :", state) + } + } + + // Check that resources in the cluster were destroyed + podList := &corev1.PodList{} + err = config.Client().Resources().WithNamespace("validation-test").List(ctx, podList) + if len(podList.Items) != 0 || err != nil { + t.Fatal("pods should not exist in validation-test namespace") + } + if err := config.Client().Resources().Get(ctx, "test-deployment", "validation-test", &appsv1.Deployment{}); err == nil { + t.Fatal("deployment test-deployment should not exist") + } + + return ctx + }). Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context { // Delete the secure namespace secureNamespace := &corev1.Namespace{ diff --git a/src/test/e2e/scenarios/create-resources/oscal-component-wait-read.yaml b/src/test/e2e/scenarios/create-resources/oscal-component-wait-read.yaml new file mode 100644 index 00000000..e9649006 --- /dev/null +++ b/src/test/e2e/scenarios/create-resources/oscal-component-wait-read.yaml @@ -0,0 +1,116 @@ +# add the descriptions inline +component-definition: + uuid: E6A291A4-2BC8-43A0-B4B2-FD67CAAE1F8F + metadata: + title: OSCAL Demo Tool + last-modified: "2022-09-13T12:00:00Z" + version: "20220913" + oscal-version: 1.1.1 + parties: + # Should be consistent across all of the packages, but where is ground truth? + - uuid: C18F4A9F-A402-415B-8D13-B51739D689FF + type: organization + name: Defense Unicorns + links: + - href: https://github.com/defenseunicorns/lula + rel: website + components: + - uuid: A9D5204C-7E5B-4C43-BD49-34DF759B9F04 + type: software + title: lula + description: | + Defense Unicorns lula + purpose: Validate compliance controls + responsible-roles: + - role-id: provider + party-uuids: + - C18F4A9F-A402-415B-8D13-B51739D689FF # matches parties entry for Defense Unicorns + control-implementations: + - uuid: A584FEDC-8CEA-4B0C-9F07-85C2C4AE751A + source: https://github.com/defenseunicorns/lula # Link to generic security document + description: Validate generic security requirements + implemented-requirements: + - uuid: 42C2FFDC-5F05-44DF-A67F-EEC8660AEFFD + control-id: ID-1 + description: >- + Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, + quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum + dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. + links: + - href: '#e2b65e34-f211-4503-b879-b19419e509a8' + rel: lula + text: Test creation of resources with wait and read + back-matter: + resources: + - uuid: e2b65e34-f211-4503-b879-b19419e509a8 + description: | + metadata: + name: validate-with-create-and-read + uuid: e2b65e34-f211-4503-b879-b19419e509a8 + domain: + type: kubernetes + kubernetes-spec: + create-resources: + - name: testDeploy + manifest: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: test-deployment + namespace: validation-test + spec: + replicas: 1 + selector: + matchLabels: + app: test-app + template: + metadata: + labels: + app: test-app + spec: + containers: + - name: test-container + image: nginx + livenessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 25 + periodSeconds: 5 + readinessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 10 + periodSeconds: 4 + wait: + group: apps + version: v1 + resource: deployments + name: test-deployment + namespace: validation-test + resources: + - name: validationTestPods + resource-rule: + version: v1 + resource: pods + namespaces: [validation-test] + provider: + type: opa + opa-spec: + rego: | + package validate + import rego.v1 + + # Default values + default validate := false + default msg := "Not evaluated" + + # All containers must be ready + validate if { + every pod in input.validationTestPods { + every containerStatus in pod.status.containerStatuses { + containerStatus.ready + } + } + } \ No newline at end of file diff --git a/src/test/e2e/scenarios/create-resources/oscal-component.yaml b/src/test/e2e/scenarios/create-resources/oscal-component.yaml index 6b8617c0..25b26299 100644 --- a/src/test/e2e/scenarios/create-resources/oscal-component.yaml +++ b/src/test/e2e/scenarios/create-resources/oscal-component.yaml @@ -37,9 +37,6 @@ component-definition: quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. links: - # - href: 'file://./validation.yaml' - # rel: lula - # text: Test creation of resources remote - href: '#bfca025a-b92e-467c-8350-56cb10eb5575' rel: lula text: Test creation of resources local