From 267792eda714ca4a22acce7a74f25175f2d04891 Mon Sep 17 00:00:00 2001
From: David Gubler <david.gubler@vshn.net>
Date: Fri, 8 Sep 2023 15:03:53 +0200
Subject: [PATCH] Add partOf functionality

---
 .gitignore                                    |   1 +
 README.md                                     |   1 +
 go.mod                                        |   2 +-
 internal/prechecks.go                         |  61 ++-
 main.go                                       |   4 +-
 pkg/converter/converter.go                    | 390 +++++++++---------
 pkg/ir/ir.go                                  |  55 ++-
 pkg/util/configutils.go                       |   4 +
 .../manifests/portal-oasp-8001-ingress.yaml   |  28 --
 ...-ingress.yaml => portal-oasp-ingress.yaml} |  17 +-
 tests/golden/parts.yaml                       |   7 +
 tests/golden/parts/.k8ify.defaults.yaml       |   3 +
 tests/golden/parts/docker-compose-prod.yml    |  58 +++
 tests/golden/parts/docker-compose.yml         |  26 ++
 .../parts/manifests/mongo-env-secret.yaml     |   7 +
 .../mongo-metrics-sidecar-env-secret.yaml     |   7 +
 .../golden/parts/manifests/mongo-service.yaml |  19 +
 .../parts/manifests/mongo-statefulset.yaml    | 102 +++++
 .../nginx-frontend-oasp-deployment.yaml       |  99 +++++
 .../nginx-frontend-oasp-env-secret.yaml       |  10 +
 .../nginx-frontend-oasp-ingress.yaml          |  41 ++
 .../nginx-frontend-oasp-service.yaml          |  21 +
 .../php-backend-oasp-env-secret.yaml          |  10 +
 .../sessions-oasp-persistentvolumeclaim.yaml  |  15 +
 .../webdata-oasp-persistentvolumeclaim.yaml   |  15 +
 25 files changed, 755 insertions(+), 248 deletions(-)
 delete mode 100644 tests/golden/demo/manifests/portal-oasp-8001-ingress.yaml
 rename tests/golden/demo/manifests/{portal-oasp-9001-ingress.yaml => portal-oasp-ingress.yaml} (60%)
 create mode 100644 tests/golden/parts.yaml
 create mode 100644 tests/golden/parts/.k8ify.defaults.yaml
 create mode 100644 tests/golden/parts/docker-compose-prod.yml
 create mode 100644 tests/golden/parts/docker-compose.yml
 create mode 100644 tests/golden/parts/manifests/mongo-env-secret.yaml
 create mode 100644 tests/golden/parts/manifests/mongo-metrics-sidecar-env-secret.yaml
 create mode 100644 tests/golden/parts/manifests/mongo-service.yaml
 create mode 100644 tests/golden/parts/manifests/mongo-statefulset.yaml
 create mode 100644 tests/golden/parts/manifests/nginx-frontend-oasp-deployment.yaml
 create mode 100644 tests/golden/parts/manifests/nginx-frontend-oasp-env-secret.yaml
 create mode 100644 tests/golden/parts/manifests/nginx-frontend-oasp-ingress.yaml
 create mode 100644 tests/golden/parts/manifests/nginx-frontend-oasp-service.yaml
 create mode 100644 tests/golden/parts/manifests/php-backend-oasp-env-secret.yaml
 create mode 100644 tests/golden/parts/manifests/sessions-oasp-persistentvolumeclaim.yaml
 create mode 100644 tests/golden/parts/manifests/webdata-oasp-persistentvolumeclaim.yaml

diff --git a/.gitignore b/.gitignore
index 719be6c..6c11768 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,7 @@
 /docker-compose-*
 /env
 /k8ify
+/.idea
 
 # Goreleaser
 /dist/
diff --git a/README.md b/README.md
index 1e1d0fb..67e6af9 100644
--- a/README.md
+++ b/README.md
@@ -103,6 +103,7 @@ Service Labels
 | `k8ify.converter: $script`  | Call `$script` to convert this service into a K8s object, expecting YAML on `$script`'s stdout. Used for plugging additional functionality into k8ify. The first argument sent to `$script` is the name of the resource, after that all the parameters follow (next row) |
 | `k8ify.converter.$key: $value`  | Call `$script` with parameter `--$key $value` |
 | `k8ify.serviceAccountName: $name`  | Set this service's pod(s) spec.serviceAccountName to `$name`, which tells the pod(s) to use ServiceAccount `$name` for accessing the K8s API. This does not set up the ServiceAcccount itself. |
+| `k8ify.partOf: $name`  | This compose service will be combined with another compose service (resulting in a deployment or statefulSet with multiple containers). Useful e.g. for sidecars or closely coupled services like nginx & php-fpm. |
 
 Volume Labels
 
diff --git a/go.mod b/go.mod
index 2f5fc18..feed09e 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
 module github.com/vshn/k8ify
 
-go 1.18
+go 1.21
 
 require (
 	github.com/compose-spec/compose-go v1.18.4
diff --git a/internal/prechecks.go b/internal/prechecks.go
index da43d90..80e31ae 100644
--- a/internal/prechecks.go
+++ b/internal/prechecks.go
@@ -1,31 +1,46 @@
 package internal
 
 import (
+	"github.com/vshn/k8ify/pkg/util"
 	"os"
 
-	composeTypes "github.com/compose-spec/compose-go/types"
 	"github.com/sirupsen/logrus"
 	"github.com/vshn/k8ify/pkg/ir"
 )
 
 const HLINE = "--------------------------------------------------------------------------------"
 
-func ComposeServicePrecheck(composeService composeTypes.ServiceConfig) {
-	if composeService.Deploy == nil || composeService.Deploy.Resources.Reservations == nil {
-		logrus.Error(HLINE)
-		logrus.Errorf("  Service '%s' does not have any CPU/memory reservations defined.", composeService.Name)
-		logrus.Error("  k8ify can generate K8s manifests regardless, but your service will be")
-		logrus.Error("  unreliable or not work at all: It may not start at all, be slow to react")
-		logrus.Error("  due to insufficient CPU time or get OOM killed due to insufficient memory.")
-		logrus.Error("  Please specify CPU and memory reservations like this:")
-		logrus.Error("    services:")
-		logrus.Errorf("      %s:", composeService.Name)
-		logrus.Error("        deploy:")
-		logrus.Error("          resources:")
-		logrus.Error("            reservations:    # Minimum guaranteed by K8s to be always available")
-		logrus.Error(`              cpus: "0.2"    # Number of CPU cores. Quotes are required!`)
-		logrus.Error("              memory: 256M")
-		logrus.Error(HLINE)
+func ComposeServicePrecheck(inputs *ir.Inputs) {
+	for _, service := range inputs.Services {
+		composeService := service.AsCompose()
+		if composeService.Deploy == nil || composeService.Deploy.Resources.Reservations == nil {
+			logrus.Error(HLINE)
+			logrus.Errorf("  Service '%s' does not have any CPU/memory reservations defined.", composeService.Name)
+			logrus.Error("  k8ify can generate K8s manifests regardless, but your service will be")
+			logrus.Error("  unreliable or not work at all: It may not start at all, be slow to react")
+			logrus.Error("  due to insufficient CPU time or get OOM killed due to insufficient memory.")
+			logrus.Error("  Please specify CPU and memory reservations like this:")
+			logrus.Error("    services:")
+			logrus.Errorf("      %s:", composeService.Name)
+			logrus.Error("        deploy:")
+			logrus.Error("          resources:")
+			logrus.Error("            reservations:    # Minimum guaranteed by K8s to be always available")
+			logrus.Error(`              cpus: "0.2"    # Number of CPU cores. Quotes are required!`)
+			logrus.Error("              memory: 256M")
+			logrus.Error(HLINE)
+		}
+		parentSingleton := util.IsSingleton(composeService.Labels)
+		for _, part := range service.GetParts() {
+			partSingleton := util.IsSingleton(part.AsCompose().Labels)
+			if partSingleton && !parentSingleton {
+				logrus.Errorf("Singleton compose service %s can't be part of non-singleton compose service %s", part.Name, service.Name)
+				os.Exit(1)
+			}
+			if !partSingleton && parentSingleton {
+				logrus.Errorf("Non-singleton compose service %s can't be part of singleton compose service %s", part.Name, service.Name)
+				os.Exit(1)
+			}
+		}
 	}
 }
 
@@ -34,7 +49,19 @@ func VolumesPrecheck(inputs *ir.Inputs) {
 	references := make(map[string][]string)
 
 	for _, service := range inputs.Services {
+
+		// conditions must be met not only for volumes of the parent but also all volumes of the parts
+		allVolumes := make(map[string]bool) // set semantics (eliminate duplicates)
 		for _, volumeName := range service.VolumeNames() {
+			allVolumes[volumeName] = true
+		}
+		for _, part := range service.GetParts() {
+			for _, volumeName := range part.VolumeNames() {
+				allVolumes[volumeName] = true
+			}
+		}
+
+		for volumeName := range allVolumes {
 			volume, ok := inputs.Volumes[volumeName]
 
 			// CHECK: Volume does not exist
diff --git a/main.go b/main.go
index 23a68d0..47e49a8 100644
--- a/main.go
+++ b/main.go
@@ -95,13 +95,13 @@ func Main(args []string) int {
 	}
 
 	inputs := ir.FromCompose(project)
+	internal.ComposeServicePrecheck(inputs)
 	internal.VolumesPrecheck(inputs)
 
 	objects := converter.Objects{}
 
 	for _, service := range inputs.Services {
-		internal.ComposeServicePrecheck(service.AsCompose())
-		objects = objects.Append(converter.ComposeServiceToK8s(config.Ref, &service, inputs.Volumes))
+		objects = objects.Append(converter.ComposeServiceToK8s(config.Ref, service, inputs.Volumes))
 	}
 
 	converter.PatchIngresses(objects.Ingresses, config.IngressPatch)
diff --git a/pkg/converter/converter.go b/pkg/converter/converter.go
index 71a48f0..f31bf8f 100644
--- a/pkg/converter/converter.go
+++ b/pkg/converter/converter.go
@@ -3,8 +3,10 @@ package converter
 import (
 	"fmt"
 	"log"
+	"maps"
 	"os"
 	"os/exec"
+	"sort"
 	"strconv"
 	"strings"
 
@@ -26,11 +28,11 @@ import (
 func composeServiceVolumesToK8s(
 	refSlug string,
 	mounts []composeTypes.ServiceVolumeConfig,
-	projectVolumes map[string]ir.Volume,
-) ([]core.Volume, []core.VolumeMount) {
+	projectVolumes map[string]*ir.Volume,
+) (map[string]core.Volume, []core.VolumeMount) {
 
 	volumeMounts := []core.VolumeMount{}
-	volumes := []core.Volume{}
+	volumes := make(map[string]core.Volume)
 
 	for _, mount := range mounts {
 		if mount.Type != "volume" {
@@ -45,30 +47,31 @@ func composeServiceVolumesToK8s(
 
 		volume := projectVolumes[mount.Source]
 		if volume.IsShared() {
-			volumes = append(volumes, core.Volume{
+			volumes[name] = core.Volume{
 				Name: name,
 				VolumeSource: core.VolumeSource{
 					PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
 						ClaimName: mount.Source + refSlug,
 					},
 				},
-			})
+			}
 		}
 	}
 	return volumes, volumeMounts
 }
 
-func composeServicePortsToK8s(composeServicePorts []composeTypes.ServicePortConfig) ([]core.ContainerPort, []core.ServicePort) {
-	containerPorts := []core.ContainerPort{}
+func composeServicePortsToK8sServicePorts(workload *ir.Service) []core.ServicePort {
 	servicePorts := []core.ServicePort{}
-	for _, port := range composeServicePorts {
+	ports := workload.AsCompose().Ports
+	// the single k8s service contains the ports of all parts
+	for _, part := range workload.GetParts() {
+		ports = append(ports, part.AsCompose().Ports...)
+	}
+	for _, port := range ports {
 		publishedPort, err := strconv.Atoi(port.Published)
 		if err != nil {
 			publishedPort = int(port.Target)
 		}
-		containerPorts = append(containerPorts, core.ContainerPort{
-			ContainerPort: int32(port.Target),
-		})
 		servicePorts = append(servicePorts, core.ServicePort{
 			Name: fmt.Sprint(publishedPort),
 			Port: int32(publishedPort),
@@ -77,10 +80,20 @@ func composeServicePortsToK8s(composeServicePorts []composeTypes.ServicePortConf
 			},
 		})
 	}
-	return containerPorts, servicePorts
+	return servicePorts
 }
 
-func composeServiceToSecret(refSlug string, composeService composeTypes.ServiceConfig, labels map[string]string) core.Secret {
+func composeServicePortsToK8sContainerPorts(workload *ir.Service) []core.ContainerPort {
+	containerPorts := []core.ContainerPort{}
+	for _, port := range workload.AsCompose().Ports {
+		containerPorts = append(containerPorts, core.ContainerPort{
+			ContainerPort: int32(port.Target),
+		})
+	}
+	return containerPorts
+}
+
+func composeServiceToSecret(composeService composeTypes.ServiceConfig, refSlug string, labels map[string]string) core.Secret {
 	stringData := make(map[string]string)
 	for key, value := range composeService.Environment {
 		stringData[key] = *value
@@ -95,50 +108,36 @@ func composeServiceToSecret(refSlug string, composeService composeTypes.ServiceC
 }
 
 func composeServiceToDeployment(
+	workload *ir.Service,
 	refSlug string,
-	composeService composeTypes.ServiceConfig,
-	containerPorts []core.ContainerPort,
-	volumes []core.Volume,
-	volumeMounts []core.VolumeMount,
-	secretName string,
+	projectVolumes map[string]*ir.Volume,
 	labels map[string]string,
-) apps.Deployment {
+) (apps.Deployment, []core.Secret) {
 
 	deployment := apps.Deployment{}
 	deployment.APIVersion = "apps/v1"
 	deployment.Kind = "Deployment"
-	deployment.Name = composeService.Name + refSlug
+	deployment.Name = workload.AsCompose().Name + refSlug
 	deployment.Labels = labels
-	livenessProbe, readinessProbe, startupProbe := composeServiceToProbes(composeService)
-	resources := composeServiceToResourceRequirements(composeService)
 
-	templateSpec := composeServiceToPodTemplate(
-		deployment.Name,
-		composeService.Image,
-		secretName,
-		containerPorts,
-		livenessProbe,
-		readinessProbe,
-		startupProbe,
-		volumes,
-		volumeMounts,
+	templateSpec, secrets := composeServiceToPodTemplate(
+		workload,
+		refSlug,
+		projectVolumes,
 		labels,
-		resources,
-		composeService.Entrypoint,
-		composeService.Command,
-		util.ServiceAccountName(composeService.Labels),
+		util.ServiceAccountName(workload.AsCompose().Labels),
 	)
 
 	deployment.Spec = apps.DeploymentSpec{
-		Replicas: composeServiceToReplicas(composeService),
-		Strategy: composeServiceToStrategy(composeService),
+		Replicas: composeServiceToReplicas(workload.AsCompose()),
+		Strategy: composeServiceToStrategy(workload.AsCompose()),
 		Template: templateSpec,
 		Selector: &metav1.LabelSelector{
 			MatchLabels: labels,
 		},
 	}
 
-	return deployment
+	return deployment, secrets
 }
 
 func composeServiceToStrategy(composeService composeTypes.ServiceConfig) apps.DeploymentStrategy {
@@ -165,43 +164,29 @@ func getUpdateOrder(composeService composeTypes.ServiceConfig) string {
 }
 
 func composeServiceToStatefulSet(
+	workload *ir.Service,
 	refSlug string,
-	composeService composeTypes.ServiceConfig,
-	containerPorts []core.ContainerPort,
-	volumes []core.Volume,
-	volumeMounts []core.VolumeMount,
+	projectVolumes map[string]*ir.Volume,
 	volumeClaims []core.PersistentVolumeClaim,
-	secretName string,
 	labels map[string]string,
-) apps.StatefulSet {
+) (apps.StatefulSet, []core.Secret) {
 
 	statefulset := apps.StatefulSet{}
 	statefulset.APIVersion = "apps/v1"
 	statefulset.Kind = "StatefulSet"
-	statefulset.Name = composeService.Name + refSlug
+	statefulset.Name = workload.AsCompose().Name + refSlug
 	statefulset.Labels = labels
-	livenessProbe, readinessProbe, startupProbe := composeServiceToProbes(composeService)
-	resources := composeServiceToResourceRequirements(composeService)
 
-	templateSpec := composeServiceToPodTemplate(
-		statefulset.Name,
-		composeService.Image,
-		secretName,
-		containerPorts,
-		livenessProbe,
-		readinessProbe,
-		startupProbe,
-		volumes,
-		volumeMounts,
+	templateSpec, secrets := composeServiceToPodTemplate(
+		workload,
+		refSlug,
+		projectVolumes,
 		labels,
-		resources,
-		composeService.Entrypoint,
-		composeService.Command,
-		util.ServiceAccountName(composeService.Labels),
+		util.ServiceAccountName(workload.AsCompose().Labels),
 	)
 
 	statefulset.Spec = apps.StatefulSetSpec{
-		Replicas: composeServiceToReplicas(composeService),
+		Replicas: composeServiceToReplicas(workload.AsCompose()),
 		Template: templateSpec,
 		Selector: &metav1.LabelSelector{
 			MatchLabels: labels,
@@ -209,7 +194,7 @@ func composeServiceToStatefulSet(
 		VolumeClaimTemplates: volumeClaims,
 	}
 
-	return statefulset
+	return statefulset, secrets
 }
 
 func composeServiceToReplicas(composeService composeTypes.ServiceConfig) *int32 {
@@ -223,26 +208,67 @@ func composeServiceToReplicas(composeService composeTypes.ServiceConfig) *int32
 }
 
 func composeServiceToPodTemplate(
-	name string,
-	image string,
-	secretName string,
-	ports []core.ContainerPort,
-	livenessProbe *core.Probe,
-	readinessProbe *core.Probe,
-	startupProbe *core.Probe,
-	volumes []core.Volume,
-	volumeMounts []core.VolumeMount,
+	workload *ir.Service,
+	refSlug string,
+	projectVolumes map[string]*ir.Volume,
 	labels map[string]string,
-	resources core.ResourceRequirements,
-	entrypoint []string,
-	command []string,
 	serviceAccountName string,
-) core.PodTemplateSpec {
+) (core.PodTemplateSpec, []core.Secret) {
+	container, secret, volumes := composeServiceToContainer(workload, refSlug, projectVolumes, labels)
+	containers := []core.Container{container}
+	secrets := []core.Secret{secret}
+
+	for _, part := range workload.GetParts() {
+		c, s, cvs := composeServiceToContainer(part, refSlug, projectVolumes, labels)
+		containers = append(containers, c)
+		secrets = append(secrets, s)
+		maps.Copy(volumes, cvs)
+	}
+
+	// make sure the array is sorted to have deterministic output
+	keys := make([]string, 0, len(volumes))
+	for key := range volumes {
+		keys = append(keys, key)
+	}
+	sort.Strings(keys)
+	volumesArray := []core.Volume{}
+	for _, key := range keys {
+		volumesArray = append(volumesArray, volumes[key])
+	}
 
-	container := core.Container{
-		Name:  name,
-		Image: image,
-		Ports: ports,
+	podSpec := core.PodSpec{
+		Containers:         containers,
+		RestartPolicy:      core.RestartPolicyAlways,
+		Volumes:            volumesArray,
+		ServiceAccountName: serviceAccountName,
+	}
+
+	return core.PodTemplateSpec{
+		Spec: podSpec,
+		ObjectMeta: metav1.ObjectMeta{
+			Labels: labels,
+		},
+	}, secrets
+}
+
+func composeServiceToContainer(
+	workload *ir.Service,
+	refSlug string,
+	projectVolumes map[string]*ir.Volume,
+	labels map[string]string,
+) (core.Container, core.Secret, map[string]core.Volume) {
+	composeService := workload.AsCompose()
+	volumes, volumeMounts := composeServiceVolumesToK8s(
+		refSlug, workload.AsCompose().Volumes, projectVolumes,
+	)
+	livenessProbe, readinessProbe, startupProbe := composeServiceToProbes(workload)
+	containerPorts := composeServicePortsToK8sContainerPorts(workload)
+	resources := composeServiceToResourceRequirements(composeService)
+	secret := composeServiceToSecret(workload.AsCompose(), refSlug, labels)
+	return core.Container{
+		Name:  composeService.Name + refSlug,
+		Image: composeService.Image,
+		Ports: containerPorts,
 		// We COULD put the environment variables here, but because some of them likely contain sensitive data they are stored in a secret instead
 		// Env:          envVars,
 		// Reference the secret:
@@ -250,7 +276,7 @@ func composeServiceToPodTemplate(
 			{
 				SecretRef: &core.SecretEnvSource{
 					LocalObjectReference: core.LocalObjectReference{
-						Name: secretName,
+						Name: secret.Name,
 					},
 				},
 			},
@@ -260,24 +286,10 @@ func composeServiceToPodTemplate(
 		ReadinessProbe:  readinessProbe,
 		StartupProbe:    startupProbe,
 		Resources:       resources,
-		Command:         entrypoint, // ENTRYPOINT in Docker == 'entrypoint' in Compose == 'command' in K8s
-		Args:            command,    // CMD in Docker == 'command' in Compose == 'args' in K8s
+		Command:         composeService.Entrypoint, // ENTRYPOINT in Docker == 'entrypoint' in Compose == 'command' in K8s
+		Args:            composeService.Command,    // CMD in Docker == 'command' in Compose == 'args' in K8s
 		ImagePullPolicy: core.PullAlways,
-	}
-
-	podSpec := core.PodSpec{
-		Containers:         []core.Container{container},
-		RestartPolicy:      core.RestartPolicyAlways,
-		Volumes:            volumes,
-		ServiceAccountName: serviceAccountName,
-	}
-
-	return core.PodTemplateSpec{
-		Spec: podSpec,
-		ObjectMeta: metav1.ObjectMeta{
-			Labels: labels,
-		},
-	}
+	}, secret, volumes
 }
 
 func composeServiceToService(refSlug string, composeService composeTypes.ServiceConfig, servicePorts []core.ServicePort, labels map[string]string) core.Service {
@@ -294,72 +306,82 @@ func composeServiceToService(refSlug string, composeService composeTypes.Service
 	return service
 }
 
-func composeServiceToIngress(refSlug string, composeService composeTypes.ServiceConfig, service core.Service, labels map[string]string) []networking.Ingress {
-	ingresses := []networking.Ingress{}
-	for i, port := range service.Spec.Ports {
-		// we expect the config to be in "k8ify.expose.PORT"
-		configPrefix := fmt.Sprintf("k8ify.expose.%d", port.Port)
-		ingressConfig := util.SubConfig(composeService.Labels, configPrefix, "host")
-		if _, ok := ingressConfig["host"]; !ok && i == 0 {
-			// for the first port we also accept config in "k8ify.expose"
-			ingressConfig = util.SubConfig(composeService.Labels, "k8ify.expose", "host")
-		}
-
-		if host, ok := ingressConfig["host"]; ok {
-			ingress := networking.Ingress{}
-			ingress.APIVersion = "networking.k8s.io/v1"
-			ingress.Kind = "Ingress"
-			ingress.Name = fmt.Sprintf("%s%s-%d", composeService.Name, refSlug, service.Spec.Ports[i].Port)
-			ingress.Labels = labels
-
-			serviceBackendPort := networking.ServiceBackendPort{
-				Number: service.Spec.Ports[i].Port,
-			}
-
-			ingressServiceBackend := networking.IngressServiceBackend{
-				Name: composeService.Name + refSlug,
-				Port: serviceBackendPort,
-			}
-
-			ingressBackend := networking.IngressBackend{
-				Service: &ingressServiceBackend,
-			}
-
-			pathType := networking.PathTypePrefix
-			path := networking.HTTPIngressPath{
-				PathType: &pathType,
-				Path:     "/",
-				Backend:  ingressBackend,
-			}
+func composeServiceToIngress(workload *ir.Service, refSlug string, service core.Service, labels map[string]string) *networking.Ingress {
+	composeServices := []composeTypes.ServiceConfig{workload.AsCompose()}
+	for _, w := range workload.GetParts() {
+		composeServices = append(composeServices, w.AsCompose())
+	}
 
-			httpIngressRuleValue := networking.HTTPIngressRuleValue{
-				Paths: []networking.HTTPIngressPath{path},
-			}
+	var ingressRules []networking.IngressRule
+	var ingressTLSs []networking.IngressTLS
 
-			ingressRuleValue := networking.IngressRuleValue{
-				HTTP: &httpIngressRuleValue,
+	for _, composeService := range composeServices {
+		for i, port := range service.Spec.Ports {
+			// we expect the config to be in "k8ify.expose.PORT"
+			configPrefix := fmt.Sprintf("k8ify.expose.%d", port.Port)
+			ingressConfig := util.SubConfig(composeService.Labels, configPrefix, "host")
+			if _, ok := ingressConfig["host"]; !ok && i == 0 {
+				// for the first port we also accept config in "k8ify.expose"
+				ingressConfig = util.SubConfig(composeService.Labels, "k8ify.expose", "host")
 			}
 
-			ingressRule := networking.IngressRule{
-				Host:             host,
-				IngressRuleValue: ingressRuleValue,
-			}
-
-			ingressTls := networking.IngressTLS{
-				Hosts:      []string{host},
-				SecretName: fmt.Sprintf("%s-tls", ingress.Name),
+			if host, ok := ingressConfig["host"]; ok {
+				serviceBackendPort := networking.ServiceBackendPort{
+					Number: service.Spec.Ports[i].Port,
+				}
+
+				ingressServiceBackend := networking.IngressServiceBackend{
+					Name: service.Name,
+					Port: serviceBackendPort,
+				}
+
+				ingressBackend := networking.IngressBackend{
+					Service: &ingressServiceBackend,
+				}
+
+				pathType := networking.PathTypePrefix
+				path := networking.HTTPIngressPath{
+					PathType: &pathType,
+					Path:     "/",
+					Backend:  ingressBackend,
+				}
+
+				httpIngressRuleValue := networking.HTTPIngressRuleValue{
+					Paths: []networking.HTTPIngressPath{path},
+				}
+
+				ingressRuleValue := networking.IngressRuleValue{
+					HTTP: &httpIngressRuleValue,
+				}
+
+				ingressRules = append(ingressRules, networking.IngressRule{
+					Host:             host,
+					IngressRuleValue: ingressRuleValue,
+				})
+
+				ingressTLSs = append(ingressTLSs, networking.IngressTLS{
+					Hosts:      []string{host},
+					SecretName: workload.Name + refSlug,
+				})
 			}
+		}
+	}
 
-			ingressSpec := networking.IngressSpec{
-				Rules: []networking.IngressRule{ingressRule},
-				TLS:   []networking.IngressTLS{ingressTls},
-			}
+	if len(ingressRules) == 0 {
+		return nil
+	}
 
-			ingress.Spec = ingressSpec
-			ingresses = append(ingresses, ingress)
-		}
+	ingress := networking.Ingress{}
+	ingress.APIVersion = "networking.k8s.io/v1"
+	ingress.Kind = "Ingress"
+	ingress.Name = workload.Name + refSlug
+	ingress.Labels = labels
+	ingress.Spec = networking.IngressSpec{
+		Rules: ingressRules,
+		TLS:   ingressTLSs,
 	}
-	return ingresses
+
+	return &ingress
 }
 
 func composeServiceToProbe(config map[string]string, port intstr.IntOrString) *core.Probe {
@@ -410,7 +432,8 @@ func composeServiceToProbe(config map[string]string, port intstr.IntOrString) *c
 	}
 }
 
-func composeServiceToProbes(composeService composeTypes.ServiceConfig) (*core.Probe, *core.Probe, *core.Probe) {
+func composeServiceToProbes(workload *ir.Service) (*core.Probe, *core.Probe, *core.Probe) {
+	composeService := workload.AsCompose()
 	if len(composeService.Ports) == 0 {
 		return nil, nil, nil
 	}
@@ -519,7 +542,7 @@ func CallExternalConverter(resourceName string, options map[string]string) (unst
 	return otherResource, nil
 }
 
-func ComposeServiceToK8s(ref string, workload *ir.Service, projectVolumes map[string]ir.Volume) Objects {
+func ComposeServiceToK8s(ref string, workload *ir.Service, projectVolumes map[string]*ir.Volume) Objects {
 	refSlug := toRefSlug(util.SanitizeWithMinLength(ref, 4), workload)
 	labels := make(map[string]string)
 	labels["k8ify.service"] = workload.Name
@@ -548,26 +571,24 @@ func ComposeServiceToK8s(ref string, workload *ir.Service, projectVolumes map[st
 
 	composeService := workload.AsCompose()
 
-	secret := composeServiceToSecret(refSlug, composeService, labels)
-	objects.Secrets = []core.Secret{secret}
-
-	containerPorts, servicePorts := composeServicePortsToK8s(composeService.Ports)
+	servicePorts := composeServicePortsToK8sServicePorts(workload)
 	service := composeServiceToService(refSlug, composeService, servicePorts, labels)
 	objects.Services = []core.Service{service}
 
-	volumes, volumeMounts := composeServiceVolumesToK8s(
-		refSlug, composeService.Volumes, projectVolumes,
-	)
-
-	// Find volumes used by this service
+	// Find volumes used by this service and all its parts
 	rwoVolumes, rwxVolumes := workload.Volumes(projectVolumes)
+	for _, part := range workload.GetParts() {
+		rwoV, rwxV := part.Volumes(projectVolumes)
+		maps.Copy(rwoVolumes, rwoV)
+		maps.Copy(rwxVolumes, rwxV)
+	}
 
 	// All shared (rwx) volumes used by the service, no matter if the service is a StatefulSet or a Deployment, must be
 	// turned into PersistentVolumeClaims. Note that since these volumes are shared, the same PersistentVolumeClaim might
 	// be generated by multiple compose services. Objects.Append() takes care of deduplication.
 	pvcs := []core.PersistentVolumeClaim{}
 	for _, vol := range rwxVolumes {
-		pvcs = append(pvcs, ComposeSharedVolumeToK8s(ref, &vol))
+		pvcs = append(pvcs, ComposeSharedVolumeToK8s(ref, vol))
 	}
 	objects.PersistentVolumeClaims = pvcs
 
@@ -577,34 +598,35 @@ func ComposeServiceToK8s(ref string, workload *ir.Service, projectVolumes map[st
 		// ensuring that each volume remains rwo
 		pvcs := []core.PersistentVolumeClaim{}
 		for _, vol := range rwoVolumes {
-			pvcs = append(pvcs, composeVolumeToPvc(vol.Name, labels, &vol))
+			pvcs = append(pvcs, composeVolumeToPvc(vol.Name, labels, vol))
 		}
 
-		statefulset := composeServiceToStatefulSet(
+		statefulset, secrets := composeServiceToStatefulSet(
+			workload,
 			refSlug,
-			composeService,
-			containerPorts,
-			volumes,
-			volumeMounts,
+			projectVolumes,
 			pvcs,
-			secret.Name,
 			labels,
 		)
 		objects.StatefulSets = []apps.StatefulSet{statefulset}
+		objects.Secrets = secrets
 	} else {
-		deployment := composeServiceToDeployment(refSlug,
-			composeService,
-			containerPorts,
-			volumes,
-			volumeMounts,
-			secret.Name,
+		deployment, secrets := composeServiceToDeployment(
+			workload,
+			refSlug,
+			projectVolumes,
 			labels,
 		)
 		objects.Deployments = []apps.Deployment{deployment}
+		objects.Secrets = secrets
 	}
 
-	ingresses := composeServiceToIngress(refSlug, composeService, service, labels)
-	objects.Ingresses = ingresses
+	ingress := composeServiceToIngress(workload, refSlug, service, labels)
+	if ingress == nil {
+		objects.Ingresses = []networking.Ingress{}
+	} else {
+		objects.Ingresses = []networking.Ingress{*ingress}
+	}
 
 	return objects
 }
diff --git a/pkg/ir/ir.go b/pkg/ir/ir.go
index fb49e0c..25db5c3 100644
--- a/pkg/ir/ir.go
+++ b/pkg/ir/ir.go
@@ -7,28 +7,45 @@ import (
 )
 
 type Inputs struct {
-	Services map[string]Service
-	Volumes  map[string]Volume
+	Services map[string]*Service
+	Volumes  map[string]*Volume
 }
 
 func NewInputs() *Inputs {
 	return &Inputs{
-		Services: make(map[string]Service),
-		Volumes:  make(map[string]Volume),
+		Services: make(map[string]*Service),
+		Volumes:  make(map[string]*Volume),
 	}
 }
 
 func FromCompose(project *composeTypes.Project) *Inputs {
 	inputs := NewInputs()
 
+	// first find out all the regular ("parent") services
 	for _, composeService := range project.Services {
+		if util.PartOf(composeService.Labels) != nil {
+			continue
+		}
 		// `project.Services` is a list, so we use the name as reported by the
 		// service
 		inputs.Services[composeService.Name] = NewService(composeService.Name, composeService)
 	}
 
+	// then find all the parts that belong to a parent service and attach them to their parents
+	for _, composeService := range project.Services {
+		partOf := util.PartOf(composeService.Labels)
+		if partOf == nil {
+			continue
+		}
+		parent, ok := inputs.Services[*partOf]
+		if ok {
+			service := NewService(composeService.Name, composeService)
+			parent.AddPart(service)
+		}
+	}
+
 	for name, composeVolume := range project.Volumes {
-		// `project.Volumes` is a map where the key is the volume name, while
+		// `project.CollectVolumes` is a map where the key is the volume name, while
 		// `volume.Name` is something else (the name prefixed with `_`???). So
 		// we use the key as the name.
 		inputs.Volumes[name] = NewVolume(name, composeVolume)
@@ -43,10 +60,12 @@ type Service struct {
 	Name string
 
 	raw composeTypes.ServiceConfig
+
+	parts []*Service
 }
 
-func NewService(name string, composeService composeTypes.ServiceConfig) Service {
-	return Service{Name: name, raw: composeService}
+func NewService(name string, composeService composeTypes.ServiceConfig) *Service {
+	return &Service{Name: name, raw: composeService, parts: make([]*Service, 0)}
 }
 
 // AsCompose returns the underlying compose config
@@ -55,6 +74,14 @@ func (s *Service) AsCompose() composeTypes.ServiceConfig {
 	return s.raw
 }
 
+func (s *Service) AddPart(part *Service) {
+	s.parts = append(s.parts, part)
+}
+
+func (s *Service) GetParts() []*Service {
+	return s.parts
+}
+
 // VolumeNames lists the names of all volumes that are mounted by this service
 func (s *Service) VolumeNames() []string {
 	names := []string{}
@@ -71,15 +98,15 @@ func (s *Service) VolumeNames() []string {
 	return names
 }
 
-func (s *Service) Volumes(volumes map[string]Volume) ([]Volume, []Volume) {
-	rwoVolumes := []Volume{}
-	rwxVolumes := []Volume{}
+func (s *Service) Volumes(volumes map[string]*Volume) (map[string]*Volume, map[string]*Volume) {
+	rwoVolumes := make(map[string]*Volume)
+	rwxVolumes := make(map[string]*Volume)
 	for _, volumeName := range s.VolumeNames() {
 		volume := volumes[volumeName]
 		if volume.IsShared() {
-			rwxVolumes = append(rwxVolumes, volume)
+			rwxVolumes[volume.Name] = volume
 		} else {
-			rwoVolumes = append(rwoVolumes, volume)
+			rwoVolumes[volume.Name] = volume
 		}
 	}
 	return rwoVolumes, rwxVolumes
@@ -100,8 +127,8 @@ type Volume struct {
 	raw composeTypes.VolumeConfig
 }
 
-func NewVolume(name string, composeVolume composeTypes.VolumeConfig) Volume {
-	return Volume{
+func NewVolume(name string, composeVolume composeTypes.VolumeConfig) *Volume {
+	return &Volume{
 		Name: name,
 		raw:  composeVolume,
 	}
diff --git a/pkg/util/configutils.go b/pkg/util/configutils.go
index f1f812a..3b5c4af 100644
--- a/pkg/util/configutils.go
+++ b/pkg/util/configutils.go
@@ -99,6 +99,10 @@ func Converter(labels map[string]string) *string {
 	return GetOptional(labels, "k8ify.converter")
 }
 
+func PartOf(labels map[string]string) *string {
+	return GetOptional(labels, "k8ify.partOf")
+}
+
 // StorageSize determines the requested storage size for a volume, or a
 // fallback value.
 func StorageSize(labels map[string]string, fallback string) resource.Quantity {
diff --git a/tests/golden/demo/manifests/portal-oasp-8001-ingress.yaml b/tests/golden/demo/manifests/portal-oasp-8001-ingress.yaml
deleted file mode 100644
index e2d2674..0000000
--- a/tests/golden/demo/manifests/portal-oasp-8001-ingress.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-apiVersion: networking.k8s.io/v1
-kind: Ingress
-metadata:
-  annotations:
-    cert-manager.io/cluster-issuer: letsencrypt-production
-  creationTimestamp: null
-  labels:
-    k8ify.ref-slug: oasp
-    k8ify.service: portal
-  name: portal-oasp-8001
-spec:
-  rules:
-  - host: portal-k8ify.apps.cloudscale-lpg-2.appuio.cloud
-    http:
-      paths:
-      - backend:
-          service:
-            name: portal-oasp
-            port:
-              number: 8001
-        path: /
-        pathType: Prefix
-  tls:
-  - hosts:
-    - portal-k8ify.apps.cloudscale-lpg-2.appuio.cloud
-    secretName: portal-oasp-8001-tls
-status:
-  loadBalancer: {}
diff --git a/tests/golden/demo/manifests/portal-oasp-9001-ingress.yaml b/tests/golden/demo/manifests/portal-oasp-ingress.yaml
similarity index 60%
rename from tests/golden/demo/manifests/portal-oasp-9001-ingress.yaml
rename to tests/golden/demo/manifests/portal-oasp-ingress.yaml
index e3f87f8..1d99bdd 100644
--- a/tests/golden/demo/manifests/portal-oasp-9001-ingress.yaml
+++ b/tests/golden/demo/manifests/portal-oasp-ingress.yaml
@@ -7,9 +7,19 @@ metadata:
   labels:
     k8ify.ref-slug: oasp
     k8ify.service: portal
-  name: portal-oasp-9001
+  name: portal-oasp
 spec:
   rules:
+  - host: portal-k8ify.apps.cloudscale-lpg-2.appuio.cloud
+    http:
+      paths:
+      - backend:
+          service:
+            name: portal-oasp
+            port:
+              number: 8001
+        path: /
+        pathType: Prefix
   - host: portal-k8ify-admin.apps.cloudscale-lpg-2.appuio.cloud
     http:
       paths:
@@ -21,8 +31,11 @@ spec:
         path: /
         pathType: Prefix
   tls:
+  - hosts:
+    - portal-k8ify.apps.cloudscale-lpg-2.appuio.cloud
+    secretName: portal-oasp
   - hosts:
     - portal-k8ify-admin.apps.cloudscale-lpg-2.appuio.cloud
-    secretName: portal-oasp-9001-tls
+    secretName: portal-oasp
 status:
   loadBalancer: {}
diff --git a/tests/golden/parts.yaml b/tests/golden/parts.yaml
new file mode 100644
index 0000000..121b1f1
--- /dev/null
+++ b/tests/golden/parts.yaml
@@ -0,0 +1,7 @@
+---
+environments:
+  prod:
+    vars:
+      prod_mongodb_password: very secret yeah
+      UID: "42"
+      GID: "42"
diff --git a/tests/golden/parts/.k8ify.defaults.yaml b/tests/golden/parts/.k8ify.defaults.yaml
new file mode 100644
index 0000000..4a057ae
--- /dev/null
+++ b/tests/golden/parts/.k8ify.defaults.yaml
@@ -0,0 +1,3 @@
+ingressPatch:
+  addAnnotations:
+    cert-manager.io/cluster-issuer: letsencrypt-production
diff --git a/tests/golden/parts/docker-compose-prod.yml b/tests/golden/parts/docker-compose-prod.yml
new file mode 100644
index 0000000..017c624
--- /dev/null
+++ b/tests/golden/parts/docker-compose-prod.yml
@@ -0,0 +1,58 @@
+version: '3.4'
+services:
+  nginx-frontend:
+    image: nginx-frontend:prod
+    deploy:
+      replicas: 2
+      resources:
+        reservations:
+          cpus: '1'
+          memory: 2G
+    labels:
+      k8ify.expose.80: 'mywebapp.example.com'
+  php-backend:
+    image: php-backend:prod
+    labels:
+      k8ify.partOf: nginx-frontend
+      k8ify.expose.4480: 'nginx-bypass.mywebapp.example.com'
+    deploy:
+      resources:
+        reservations:
+          cpus: '2'
+          memory: 4G
+
+  mongo:
+    labels:
+      k8ify.singleton: true
+    image: mongo:4.0
+    restart: always
+    deploy:
+      resources:
+        limits:
+          memory: 8G
+        reservations:
+          cpus: '0.5'
+          memory: 4G
+    ports:
+      - '127.0.0.1:27017:27017'
+    volumes:
+      - mongodb_data:/data/db
+  mongo-metrics-sidecar:
+    image: metrics-sidecar:latest
+    deploy:
+      resources:
+        reservations:
+          cpus: '0.1'
+          memory: 256M
+    labels:
+      k8ify.singleton: true
+      k8ify.partOf: mongo
+    ports:
+      - '127.0.0.1:33000:33000'
+    volumes:
+      - mongodb_data:/data/db
+
+volumes:
+  mongodb_data:
+    labels:
+      k8ify.singleton: true
diff --git a/tests/golden/parts/docker-compose.yml b/tests/golden/parts/docker-compose.yml
new file mode 100644
index 0000000..0239ef9
--- /dev/null
+++ b/tests/golden/parts/docker-compose.yml
@@ -0,0 +1,26 @@
+version: '3.4'
+services:
+  nginx-frontend:
+    image: nginx-frontend:dev
+    ports:
+      - "80:80"
+    volumes:
+      - webdata:/data/web
+    environment:
+      - foo=bar
+  php-backend:
+    image: php-backend:dev
+    ports:
+      - "4480:4480"
+    volumes:
+      - webdata:/data/web
+      - sessions:/data/sessions
+    environment:
+      - xyz=baz
+volumes:
+  webdata:
+    labels:
+      k8ify.shared: true
+  sessions:
+    labels:
+      k8ify.shared: true
diff --git a/tests/golden/parts/manifests/mongo-env-secret.yaml b/tests/golden/parts/manifests/mongo-env-secret.yaml
new file mode 100644
index 0000000..1992f06
--- /dev/null
+++ b/tests/golden/parts/manifests/mongo-env-secret.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Secret
+metadata:
+  creationTimestamp: null
+  labels:
+    k8ify.service: mongo
+  name: mongo-env
diff --git a/tests/golden/parts/manifests/mongo-metrics-sidecar-env-secret.yaml b/tests/golden/parts/manifests/mongo-metrics-sidecar-env-secret.yaml
new file mode 100644
index 0000000..848d199
--- /dev/null
+++ b/tests/golden/parts/manifests/mongo-metrics-sidecar-env-secret.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Secret
+metadata:
+  creationTimestamp: null
+  labels:
+    k8ify.service: mongo
+  name: mongo-metrics-sidecar-env
diff --git a/tests/golden/parts/manifests/mongo-service.yaml b/tests/golden/parts/manifests/mongo-service.yaml
new file mode 100644
index 0000000..aca5707
--- /dev/null
+++ b/tests/golden/parts/manifests/mongo-service.yaml
@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Service
+metadata:
+  creationTimestamp: null
+  labels:
+    k8ify.service: mongo
+  name: mongo
+spec:
+  ports:
+  - name: "27017"
+    port: 27017
+    targetPort: 27017
+  - name: "33000"
+    port: 33000
+    targetPort: 33000
+  selector:
+    k8ify.service: mongo
+status:
+  loadBalancer: {}
diff --git a/tests/golden/parts/manifests/mongo-statefulset.yaml b/tests/golden/parts/manifests/mongo-statefulset.yaml
new file mode 100644
index 0000000..60161d1
--- /dev/null
+++ b/tests/golden/parts/manifests/mongo-statefulset.yaml
@@ -0,0 +1,102 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  creationTimestamp: null
+  labels:
+    k8ify.service: mongo
+  name: mongo
+spec:
+  selector:
+    matchLabels:
+      k8ify.service: mongo
+  serviceName: ""
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        k8ify.service: mongo
+    spec:
+      containers:
+      - envFrom:
+        - secretRef:
+            name: mongo-env
+        image: mongo:4.0
+        imagePullPolicy: Always
+        livenessProbe:
+          failureThreshold: 3
+          periodSeconds: 30
+          successThreshold: 1
+          tcpSocket:
+            port: 27017
+          timeoutSeconds: 60
+        name: mongo
+        ports:
+        - containerPort: 27017
+        resources:
+          limits:
+            memory: 8Gi
+          requests:
+            cpu: 500m
+            memory: 4Gi
+        startupProbe:
+          failureThreshold: 30
+          periodSeconds: 10
+          successThreshold: 1
+          tcpSocket:
+            port: 27017
+          timeoutSeconds: 60
+        volumeMounts:
+        - mountPath: /data/db
+          name: mongodb-data
+      - envFrom:
+        - secretRef:
+            name: mongo-metrics-sidecar-env
+        image: metrics-sidecar:latest
+        imagePullPolicy: Always
+        livenessProbe:
+          failureThreshold: 3
+          periodSeconds: 30
+          successThreshold: 1
+          tcpSocket:
+            port: 33000
+          timeoutSeconds: 60
+        name: mongo-metrics-sidecar
+        ports:
+        - containerPort: 33000
+        resources:
+          limits:
+            cpu: "1"
+            memory: 256Mi
+          requests:
+            cpu: 100m
+            memory: 256Mi
+        startupProbe:
+          failureThreshold: 30
+          periodSeconds: 10
+          successThreshold: 1
+          tcpSocket:
+            port: 33000
+          timeoutSeconds: 60
+        volumeMounts:
+        - mountPath: /data/db
+          name: mongodb-data
+      restartPolicy: Always
+  updateStrategy: {}
+  volumeClaimTemplates:
+  - apiVersion: v1
+    kind: PersistentVolumeClaim
+    metadata:
+      creationTimestamp: null
+      labels:
+        k8ify.service: mongo
+      name: mongodb-data
+    spec:
+      accessModes:
+      - ReadWriteOnce
+      resources:
+        requests:
+          storage: 1Gi
+    status: {}
+status:
+  availableReplicas: 0
+  replicas: 0
diff --git a/tests/golden/parts/manifests/nginx-frontend-oasp-deployment.yaml b/tests/golden/parts/manifests/nginx-frontend-oasp-deployment.yaml
new file mode 100644
index 0000000..1aa3eb2
--- /dev/null
+++ b/tests/golden/parts/manifests/nginx-frontend-oasp-deployment.yaml
@@ -0,0 +1,99 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  creationTimestamp: null
+  labels:
+    k8ify.ref-slug: oasp
+    k8ify.service: nginx-frontend
+  name: nginx-frontend-oasp
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      k8ify.ref-slug: oasp
+      k8ify.service: nginx-frontend
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        k8ify.ref-slug: oasp
+        k8ify.service: nginx-frontend
+    spec:
+      containers:
+      - envFrom:
+        - secretRef:
+            name: nginx-frontend-oasp-env
+        image: nginx-frontend:prod
+        imagePullPolicy: Always
+        livenessProbe:
+          failureThreshold: 3
+          periodSeconds: 30
+          successThreshold: 1
+          tcpSocket:
+            port: 80
+          timeoutSeconds: 60
+        name: nginx-frontend-oasp
+        ports:
+        - containerPort: 80
+        resources:
+          limits:
+            cpu: "10"
+            memory: 2Gi
+          requests:
+            cpu: "1"
+            memory: 2Gi
+        startupProbe:
+          failureThreshold: 30
+          periodSeconds: 10
+          successThreshold: 1
+          tcpSocket:
+            port: 80
+          timeoutSeconds: 60
+        volumeMounts:
+        - mountPath: /data/web
+          name: webdata
+      - envFrom:
+        - secretRef:
+            name: php-backend-oasp-env
+        image: php-backend:prod
+        imagePullPolicy: Always
+        livenessProbe:
+          failureThreshold: 3
+          periodSeconds: 30
+          successThreshold: 1
+          tcpSocket:
+            port: 4480
+          timeoutSeconds: 60
+        name: php-backend-oasp
+        ports:
+        - containerPort: 4480
+        resources:
+          limits:
+            cpu: "20"
+            memory: 4Gi
+          requests:
+            cpu: "2"
+            memory: 4Gi
+        startupProbe:
+          failureThreshold: 30
+          periodSeconds: 10
+          successThreshold: 1
+          tcpSocket:
+            port: 4480
+          timeoutSeconds: 60
+        volumeMounts:
+        - mountPath: /data/sessions
+          name: sessions
+        - mountPath: /data/web
+          name: webdata
+      restartPolicy: Always
+      volumes:
+      - name: sessions
+        persistentVolumeClaim:
+          claimName: sessions-oasp
+      - name: webdata
+        persistentVolumeClaim:
+          claimName: webdata-oasp
+status: {}
diff --git a/tests/golden/parts/manifests/nginx-frontend-oasp-env-secret.yaml b/tests/golden/parts/manifests/nginx-frontend-oasp-env-secret.yaml
new file mode 100644
index 0000000..1f638e9
--- /dev/null
+++ b/tests/golden/parts/manifests/nginx-frontend-oasp-env-secret.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Secret
+metadata:
+  creationTimestamp: null
+  labels:
+    k8ify.ref-slug: oasp
+    k8ify.service: nginx-frontend
+  name: nginx-frontend-oasp-env
+stringData:
+  foo: bar
diff --git a/tests/golden/parts/manifests/nginx-frontend-oasp-ingress.yaml b/tests/golden/parts/manifests/nginx-frontend-oasp-ingress.yaml
new file mode 100644
index 0000000..a1cbdbf
--- /dev/null
+++ b/tests/golden/parts/manifests/nginx-frontend-oasp-ingress.yaml
@@ -0,0 +1,41 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    cert-manager.io/cluster-issuer: letsencrypt-production
+  creationTimestamp: null
+  labels:
+    k8ify.ref-slug: oasp
+    k8ify.service: nginx-frontend
+  name: nginx-frontend-oasp
+spec:
+  rules:
+  - host: mywebapp.example.com
+    http:
+      paths:
+      - backend:
+          service:
+            name: nginx-frontend-oasp
+            port:
+              number: 80
+        path: /
+        pathType: Prefix
+  - host: nginx-bypass.mywebapp.example.com
+    http:
+      paths:
+      - backend:
+          service:
+            name: nginx-frontend-oasp
+            port:
+              number: 4480
+        path: /
+        pathType: Prefix
+  tls:
+  - hosts:
+    - mywebapp.example.com
+    secretName: nginx-frontend-oasp
+  - hosts:
+    - nginx-bypass.mywebapp.example.com
+    secretName: nginx-frontend-oasp
+status:
+  loadBalancer: {}
diff --git a/tests/golden/parts/manifests/nginx-frontend-oasp-service.yaml b/tests/golden/parts/manifests/nginx-frontend-oasp-service.yaml
new file mode 100644
index 0000000..e942032
--- /dev/null
+++ b/tests/golden/parts/manifests/nginx-frontend-oasp-service.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Service
+metadata:
+  creationTimestamp: null
+  labels:
+    k8ify.ref-slug: oasp
+    k8ify.service: nginx-frontend
+  name: nginx-frontend-oasp
+spec:
+  ports:
+  - name: "80"
+    port: 80
+    targetPort: 80
+  - name: "4480"
+    port: 4480
+    targetPort: 4480
+  selector:
+    k8ify.ref-slug: oasp
+    k8ify.service: nginx-frontend
+status:
+  loadBalancer: {}
diff --git a/tests/golden/parts/manifests/php-backend-oasp-env-secret.yaml b/tests/golden/parts/manifests/php-backend-oasp-env-secret.yaml
new file mode 100644
index 0000000..8999f57
--- /dev/null
+++ b/tests/golden/parts/manifests/php-backend-oasp-env-secret.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Secret
+metadata:
+  creationTimestamp: null
+  labels:
+    k8ify.ref-slug: oasp
+    k8ify.service: nginx-frontend
+  name: php-backend-oasp-env
+stringData:
+  xyz: baz
diff --git a/tests/golden/parts/manifests/sessions-oasp-persistentvolumeclaim.yaml b/tests/golden/parts/manifests/sessions-oasp-persistentvolumeclaim.yaml
new file mode 100644
index 0000000..1b3472b
--- /dev/null
+++ b/tests/golden/parts/manifests/sessions-oasp-persistentvolumeclaim.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  creationTimestamp: null
+  labels:
+    k8ify.ref-slug: oasp
+    k8ify.volume: sessions
+  name: sessions-oasp
+spec:
+  accessModes:
+  - ReadWriteMany
+  resources:
+    requests:
+      storage: 1Gi
+status: {}
diff --git a/tests/golden/parts/manifests/webdata-oasp-persistentvolumeclaim.yaml b/tests/golden/parts/manifests/webdata-oasp-persistentvolumeclaim.yaml
new file mode 100644
index 0000000..7f988b8
--- /dev/null
+++ b/tests/golden/parts/manifests/webdata-oasp-persistentvolumeclaim.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  creationTimestamp: null
+  labels:
+    k8ify.ref-slug: oasp
+    k8ify.volume: webdata
+  name: webdata-oasp
+spec:
+  accessModes:
+  - ReadWriteMany
+  resources:
+    requests:
+      storage: 1Gi
+status: {}