diff --git a/README.md b/README.md index 57228b2..0764465 100644 --- a/README.md +++ b/README.md @@ -146,6 +146,14 @@ This replaces the TCP based health check by a more specific HTTP(S) check. | `k8ify.readiness.*` | All the sub-values work the same as for `k8ify.liveness` incl. defaults. No values are copied over. However the readiness check is disabled by default. | | `k8ify.readiness.enabled: false` | Enable or disable the readiness check. Default is false. | +#### Target Cluster Configuration + +There are some cases in which the output of k8ify needs to be different based on the target cluster's configuration. To make this work some properties of the target cluster can be configured via the `x-targetCfg` root key in the compose file. + +| Key | Effect | +| ---- | ------- | +| `appsDomain: $domain` | A cluster may have a wildcard certificate for apps to use. If you configure this option and expose a service using `$domain`, the resulting Ingress uses this wildcard certificate (instead of e.g. Let's Encrypt). | +| `maxExposeLength: $length` | k8ify does a length check on the exposed domain names, because if they're too long the Ingress will not work. Default is 63. | ## Conversion diff --git a/internal/prechecks.go b/internal/prechecks.go index f7cfa3e..4933faf 100644 --- a/internal/prechecks.go +++ b/internal/prechecks.go @@ -106,3 +106,16 @@ func VolumesPrecheck(inputs *ir.Inputs) { } } } + +func DomainLengthPrecheck(inputs *ir.Inputs) { + maxExposeLength := inputs.TargetCfg.MaxExposeLength() + for _, service := range inputs.Services { + for _, domain := range util.SubConfig(service.Labels(), "k8ify.expose", "default") { + if !inputs.TargetCfg.IsSubdomainOfAppsDomain(domain) && len(domain) > maxExposeLength { + logrus.Errorf("Service '%s' is supposed to be exposed on domain '%s' which is longer than %d characters. This likely won't work due to certificate common name length restrictions.", service.Name, domain, maxExposeLength) + logrus.Errorf("To fix this you can use the cluster's appsDomain wildcard certificate (compose file option 'x-targetCfg.appsDomain') or adjust this check ('x-targetCfg.maxExposeLength').") + os.Exit(1) + } + } + } +} diff --git a/main.go b/main.go index d35bfc1..59659d1 100644 --- a/main.go +++ b/main.go @@ -96,11 +96,12 @@ func Main(args []string) int { inputs := ir.FromCompose(project) internal.ComposeServicePrecheck(inputs) internal.VolumesPrecheck(inputs) + internal.DomainLengthPrecheck(inputs) objects := converter.Objects{} for _, service := range inputs.Services { - objects = objects.Append(converter.ComposeServiceToK8s(config.Ref, service, inputs.Volumes)) + objects = objects.Append(converter.ComposeServiceToK8s(config.Ref, service, inputs.Volumes, inputs.TargetCfg)) } forceRestartAnnotation := make(map[string]string) diff --git a/pkg/converter/converter.go b/pkg/converter/converter.go index 17dc378..e66ad97 100644 --- a/pkg/converter/converter.go +++ b/pkg/converter/converter.go @@ -361,7 +361,7 @@ func composeServiceToServices(refSlug string, workload *ir.Service, servicePorts return services } -func composeServiceToIngress(workload *ir.Service, refSlug string, services []core.Service, labels map[string]string) *networking.Ingress { +func composeServiceToIngress(workload *ir.Service, refSlug string, services []core.Service, labels map[string]string, targetCfg ir.TargetCfg) *networking.Ingress { var service *core.Service for _, s := range services { if serviceSpecIsUnexposedDefault(s.Spec) { @@ -422,10 +422,15 @@ func composeServiceToIngress(workload *ir.Service, refSlug string, services []co IngressRuleValue: ingressRuleValue, }) - ingressTLSs = append(ingressTLSs, networking.IngressTLS{ - Hosts: []string{host}, - SecretName: workload.Name + refSlug, - }) + if targetCfg.IsSubdomainOfAppsDomain(host) { + // special case: With an empty TLS configuration the ingress uses the cluster-wide apps domain wildcard certificate + ingressTLSs = append(ingressTLSs, networking.IngressTLS{}) + } else { + ingressTLSs = append(ingressTLSs, networking.IngressTLS{ + Hosts: []string{host}, + SecretName: workload.Name + refSlug, + }) + } } } } @@ -606,7 +611,7 @@ func CallExternalConverter(resourceName string, options map[string]string) (unst return otherResource, nil } -func ComposeServiceToK8s(ref string, workload *ir.Service, projectVolumes map[string]*ir.Volume) Objects { +func ComposeServiceToK8s(ref string, workload *ir.Service, projectVolumes map[string]*ir.Volume, targetCfg ir.TargetCfg) Objects { refSlug := toRefSlug(util.SanitizeWithMinLength(ref, 4), workload) labels := make(map[string]string) labels["k8ify.service"] = workload.Name @@ -690,7 +695,7 @@ func ComposeServiceToK8s(ref string, workload *ir.Service, projectVolumes map[st objects.Secrets = secrets } - ingress := composeServiceToIngress(workload, refSlug, objects.Services, labels) + ingress := composeServiceToIngress(workload, refSlug, objects.Services, labels, targetCfg) if ingress == nil { objects.Ingresses = []networking.Ingress{} } else { diff --git a/pkg/ir/ir.go b/pkg/ir/ir.go index b125193..3fc7816 100644 --- a/pkg/ir/ir.go +++ b/pkg/ir/ir.go @@ -9,8 +9,9 @@ import ( ) type Inputs struct { - Services map[string]*Service - Volumes map[string]*Volume + Services map[string]*Service + Volumes map[string]*Volume + TargetCfg TargetCfg } func NewInputs() *Inputs { @@ -53,6 +54,12 @@ func FromCompose(project *composeTypes.Project) *Inputs { inputs.Volumes[name] = NewVolume(name, composeVolume) } + if targetCfg, ok := project.Extensions["x-targetCfg"]; ok { + if targetCfgMap, ok := targetCfg.(map[string]interface{}); ok { + inputs.TargetCfg = targetCfgMap + } + } + return inputs } @@ -177,3 +184,44 @@ func (v *Volume) Size(fallback string) resource.Quantity { func (v *Volume) SizeIsMissing() bool { return util.StorageSizeRaw(v.raw.Labels) == nil } + +type TargetCfg map[string]interface{} + +func (t TargetCfg) appsDomain() *string { + if value, ok := t["appsDomain"]; ok { + if domain, ok := value.(string); ok { + if strings.HasPrefix(domain, "*.") { + domain = domain[1:] + } + if !strings.HasPrefix(domain, ".") { + domain = "." + domain + } + if len(domain) < 2 { + return nil + } + return &domain + } + } + return nil +} + +func (t TargetCfg) IsSubdomainOfAppsDomain(domain string) bool { + appsDomain := t.appsDomain() + if appsDomain == nil || domain == "" { + return false + } + domainComponents := strings.Split(domain, ".") + if len(domainComponents) < 2 { + return false + } + return domainComponents[0]+*appsDomain == domain +} + +func (t TargetCfg) MaxExposeLength() int { + if value, ok := t["maxExposeLength"]; ok { + if length, ok := value.(int); ok { + return length + } + } + return 63 +} diff --git a/tests/golden/cluster-apps-domain.yml b/tests/golden/cluster-apps-domain.yml new file mode 100644 index 0000000..508ae83 --- /dev/null +++ b/tests/golden/cluster-apps-domain.yml @@ -0,0 +1,3 @@ +--- +environments: + prod: {} diff --git a/tests/golden/cluster-apps-domain/compose.yml b/tests/golden/cluster-apps-domain/compose.yml new file mode 100644 index 0000000..352eb8d --- /dev/null +++ b/tests/golden/cluster-apps-domain/compose.yml @@ -0,0 +1,10 @@ +services: + nginx: + labels: + k8ify.expose: foo.apps.cluster.net + image: docker.io/library/nginx + ports: + - '8080:80' + +x-targetCfg: + appsDomain: "*.apps.cluster.net" diff --git a/tests/golden/cluster-apps-domain/manifests/nginx-oasp-deployment.yaml b/tests/golden/cluster-apps-domain/manifests/nginx-oasp-deployment.yaml new file mode 100644 index 0000000..ac27539 --- /dev/null +++ b/tests/golden/cluster-apps-domain/manifests/nginx-oasp-deployment.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + k8ify.ref-slug: oasp + k8ify.service: nginx + name: nginx-oasp +spec: + selector: + matchLabels: + k8ify.ref-slug: oasp + k8ify.service: nginx + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8ify.ref-slug: oasp + k8ify.service: nginx + spec: + containers: + - image: docker.io/library/nginx + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + periodSeconds: 30 + successThreshold: 1 + tcpSocket: + port: 80 + timeoutSeconds: 60 + name: nginx-oasp + ports: + - containerPort: 80 + resources: {} + startupProbe: + failureThreshold: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 80 + timeoutSeconds: 60 + restartPolicy: Always +status: {} diff --git a/tests/golden/cluster-apps-domain/manifests/nginx-oasp-ingress.yaml b/tests/golden/cluster-apps-domain/manifests/nginx-oasp-ingress.yaml new file mode 100644 index 0000000..d780d7d --- /dev/null +++ b/tests/golden/cluster-apps-domain/manifests/nginx-oasp-ingress.yaml @@ -0,0 +1,24 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + labels: + k8ify.ref-slug: oasp + k8ify.service: nginx + name: nginx-oasp +spec: + rules: + - host: foo.apps.cluster.net + http: + paths: + - backend: + service: + name: nginx-oasp + port: + number: 8080 + path: / + pathType: Prefix + tls: + - {} +status: + loadBalancer: {} diff --git a/tests/golden/cluster-apps-domain/manifests/nginx-oasp-service.yaml b/tests/golden/cluster-apps-domain/manifests/nginx-oasp-service.yaml new file mode 100644 index 0000000..aeb65dd --- /dev/null +++ b/tests/golden/cluster-apps-domain/manifests/nginx-oasp-service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + k8ify.ref-slug: oasp + k8ify.service: nginx + name: nginx-oasp +spec: + ports: + - name: "8080" + port: 8080 + targetPort: 80 + selector: + k8ify.ref-slug: oasp + k8ify.service: nginx +status: + loadBalancer: {}