From f51610aa84a07ee90ef287b0881093447bd7b88a Mon Sep 17 00:00:00 2001 From: Lawrence Gadban Date: Fri, 17 Jan 2025 12:53:42 -0600 Subject: [PATCH 1/4] delete ingress, knative, accesslogger --- Makefile | 88 +-- docs/content/static/content/osa_provided.md | 1 - go.mod | 1 - go.sum | 5 - install/helm/gloo/generate.go | 5 - install/helm/gloo/generate/values.go | 96 --- .../gloo/templates/10-ingress-deployment.yaml | 98 --- .../11-ingress-proxy-deployment.yaml | 101 --- .../templates/12-ingress-proxy-configmap.yaml | 156 ----- .../templates/13-ingress-proxy-service.yaml | 45 -- .../14-clusteringress-proxy-deployment.yaml | 94 --- .../15-clusteringress-proxy-configmap.yaml | 157 ----- .../16-clusteringress-proxy-service.yaml | 36 - .../21-namespace-clusterrole-ingress.yaml | 35 - .../22-namespace-clusterrole-knative.yaml | 44 -- ...-namespace-clusterrolebinding-ingress.yaml | 28 - .../26-knative-external-proxy-deployment.yaml | 97 --- .../27-knative-external-proxy-configmap.yaml | 156 ----- .../28-knative-external-proxy-service.yaml | 47 -- .../29-knative-internal-proxy-deployment.yaml | 99 --- .../30-knative-internal-proxy-configmap.yaml | 158 ----- .../31-knative-internal-proxy-service.yaml | 38 -- projects/accesslogger/cmd/Dockerfile | 11 - .../accesslogger/cmd/Dockerfile.distroless | 10 - projects/accesslogger/cmd/main.go | 11 - .../accesslogger/pkg/loggingservice/server.go | 71 -- projects/accesslogger/pkg/runner/run.go | 269 -------- projects/accesslogger/pkg/runner/settings.go | 22 - projects/clusteringress/README.md | 167 ----- .../api/external/knative/cluster_ingress.go | 30 - .../api/external/knative/solo-kit.json | 13 - projects/clusteringress/api/v1/solo-kit.json | 16 - .../pkg/api/custom/knative/cache.go | 84 --- .../knative/knative_clusteringress_client.go | 165 ----- .../external/knative/cluster_ingress.sk.go | 159 ----- .../knative/cluster_ingress_client.sk.go | 130 ---- .../knative/cluster_ingress_reconciler.sk.go | 47 -- .../pkg/api/v1/translator_event_loop.sk.go | 153 ----- .../api/v1/translator_simple_event_loop.sk.go | 134 ---- .../pkg/api/v1/translator_snapshot.sk.go | 146 ---- .../api/v1/translator_snapshot_emitter.sk.go | 261 -------- .../translator_snapshot_simple_emitter.sk.go | 109 --- .../pkg/translator/translate.go | 26 - .../pkg/translator/translate_test.go | 341 ---------- .../pkg/translator/translator_suite_test.go | 14 - .../pkg/translator/translator_syncer.go | 182 ----- .../pkg/translator/translator_syncer_test.go | 117 ---- projects/ingress/api/v1/ingress.proto | 27 - projects/ingress/api/v1/service.proto | 28 - projects/ingress/api/v1/solo-kit.json | 30 - projects/ingress/cmd/Dockerfile | 13 - projects/ingress/cmd/Dockerfile.distroless | 11 - projects/ingress/cmd/main.go | 12 - .../pkg/api/ingress/resource_client.go | 324 --------- .../pkg/api/service/resource_client.go | 299 --------- .../ingress/pkg/api/service/service_client.go | 23 - .../ingress/pkg/api/v1/ingress.pb.clone.go | 59 -- .../ingress/pkg/api/v1/ingress.pb.equal.go | 80 --- projects/ingress/pkg/api/v1/ingress.pb.go | 183 ----- .../ingress/pkg/api/v1/ingress.pb.hash.go | 86 --- .../pkg/api/v1/ingress.pb.uniquehash.go | 87 --- projects/ingress/pkg/api/v1/ingress.sk.go | 154 ----- .../ingress/pkg/api/v1/ingress_client.sk.go | 130 ---- .../pkg/api/v1/ingress_reconciler.sk.go | 47 -- .../ingress/pkg/api/v1/kube_service.sk.go | 154 ----- .../pkg/api/v1/kube_service_client.sk.go | 130 ---- .../pkg/api/v1/kube_service_reconciler.sk.go | 47 -- .../ingress/pkg/api/v1/service.pb.clone.go | 59 -- .../ingress/pkg/api/v1/service.pb.equal.go | 80 --- projects/ingress/pkg/api/v1/service.pb.go | 182 ----- .../ingress/pkg/api/v1/service.pb.hash.go | 106 --- .../pkg/api/v1/service.pb.uniquehash.go | 107 --- .../pkg/api/v1/status_event_loop.sk.go | 153 ----- .../pkg/api/v1/status_simple_event_loop.sk.go | 134 ---- .../ingress/pkg/api/v1/status_snapshot.sk.go | 197 ------ .../pkg/api/v1/status_snapshot_emitter.sk.go | 330 --------- .../v1/status_snapshot_simple_emitter.sk.go | 109 --- .../pkg/api/v1/translator_event_loop.sk.go | 153 ----- .../api/v1/translator_simple_event_loop.sk.go | 134 ---- .../pkg/api/v1/translator_snapshot.sk.go | 252 ------- .../api/v1/translator_snapshot_emitter.sk.go | 403 ----------- .../translator_snapshot_simple_emitter.sk.go | 113 ---- projects/ingress/pkg/setup/opts.go | 25 - projects/ingress/pkg/setup/setup.go | 19 - projects/ingress/pkg/setup/setup_syncer.go | 338 ---------- projects/ingress/pkg/status/status_syncer.go | 146 ---- projects/ingress/pkg/translator/translate.go | 291 -------- .../ingress/pkg/translator/translate_test.go | 631 ------------------ .../pkg/translator/translator_suite_test.go | 14 - .../pkg/translator/translator_syncer.go | 108 --- projects/knative/README.md | 169 ----- .../knative/api/external/knative/ingress.go | 45 -- .../api/external/knative/solo-kit.json | 13 - projects/knative/api/v1/solo-kit.json | 16 - .../knative/pkg/api/custom/knative/cache.go | 84 --- .../knative/knative_clusteringress_client.go | 165 ----- .../pkg/api/external/knative/ingress.sk.go | 159 ----- .../api/external/knative/ingress_client.sk.go | 130 ---- .../external/knative/ingress_reconciler.sk.go | 47 -- .../pkg/api/v1/translator_event_loop.sk.go | 153 ----- .../api/v1/translator_simple_event_loop.sk.go | 134 ---- .../pkg/api/v1/translator_snapshot.sk.go | 146 ---- .../api/v1/translator_snapshot_emitter.sk.go | 261 -------- .../translator_snapshot_simple_emitter.sk.go | 109 --- projects/knative/pkg/translator/translate.go | 303 --------- .../knative/pkg/translator/translate_test.go | 399 ----------- .../pkg/translator/translator_suite_test.go | 14 - .../pkg/translator/translator_syncer.go | 220 ------ .../pkg/translator/translator_syncer_test.go | 228 ------- test/e2e/access_log_test.go | 364 ---------- test/e2e/grpcweb_test.go | 184 ----- test/kube2e/gloo/resource_client_test.go | 73 -- 112 files changed, 1 insertion(+), 13696 deletions(-) delete mode 100644 install/helm/gloo/templates/10-ingress-deployment.yaml delete mode 100644 install/helm/gloo/templates/11-ingress-proxy-deployment.yaml delete mode 100644 install/helm/gloo/templates/12-ingress-proxy-configmap.yaml delete mode 100644 install/helm/gloo/templates/13-ingress-proxy-service.yaml delete mode 100644 install/helm/gloo/templates/14-clusteringress-proxy-deployment.yaml delete mode 100644 install/helm/gloo/templates/15-clusteringress-proxy-configmap.yaml delete mode 100644 install/helm/gloo/templates/16-clusteringress-proxy-service.yaml delete mode 100644 install/helm/gloo/templates/21-namespace-clusterrole-ingress.yaml delete mode 100644 install/helm/gloo/templates/22-namespace-clusterrole-knative.yaml delete mode 100644 install/helm/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml delete mode 100644 install/helm/gloo/templates/26-knative-external-proxy-deployment.yaml delete mode 100644 install/helm/gloo/templates/27-knative-external-proxy-configmap.yaml delete mode 100644 install/helm/gloo/templates/28-knative-external-proxy-service.yaml delete mode 100644 install/helm/gloo/templates/29-knative-internal-proxy-deployment.yaml delete mode 100644 install/helm/gloo/templates/30-knative-internal-proxy-configmap.yaml delete mode 100644 install/helm/gloo/templates/31-knative-internal-proxy-service.yaml delete mode 100644 projects/accesslogger/cmd/Dockerfile delete mode 100644 projects/accesslogger/cmd/Dockerfile.distroless delete mode 100644 projects/accesslogger/cmd/main.go delete mode 100644 projects/accesslogger/pkg/loggingservice/server.go delete mode 100644 projects/accesslogger/pkg/runner/run.go delete mode 100644 projects/accesslogger/pkg/runner/settings.go delete mode 100644 projects/clusteringress/README.md delete mode 100644 projects/clusteringress/api/external/knative/cluster_ingress.go delete mode 100644 projects/clusteringress/api/external/knative/solo-kit.json delete mode 100644 projects/clusteringress/api/v1/solo-kit.json delete mode 100644 projects/clusteringress/pkg/api/custom/knative/cache.go delete mode 100644 projects/clusteringress/pkg/api/custom/knative/knative_clusteringress_client.go delete mode 100644 projects/clusteringress/pkg/api/external/knative/cluster_ingress.sk.go delete mode 100644 projects/clusteringress/pkg/api/external/knative/cluster_ingress_client.sk.go delete mode 100644 projects/clusteringress/pkg/api/external/knative/cluster_ingress_reconciler.sk.go delete mode 100644 projects/clusteringress/pkg/api/v1/translator_event_loop.sk.go delete mode 100644 projects/clusteringress/pkg/api/v1/translator_simple_event_loop.sk.go delete mode 100644 projects/clusteringress/pkg/api/v1/translator_snapshot.sk.go delete mode 100644 projects/clusteringress/pkg/api/v1/translator_snapshot_emitter.sk.go delete mode 100644 projects/clusteringress/pkg/api/v1/translator_snapshot_simple_emitter.sk.go delete mode 100644 projects/clusteringress/pkg/translator/translate.go delete mode 100644 projects/clusteringress/pkg/translator/translate_test.go delete mode 100644 projects/clusteringress/pkg/translator/translator_suite_test.go delete mode 100644 projects/clusteringress/pkg/translator/translator_syncer.go delete mode 100644 projects/clusteringress/pkg/translator/translator_syncer_test.go delete mode 100644 projects/ingress/api/v1/ingress.proto delete mode 100644 projects/ingress/api/v1/service.proto delete mode 100644 projects/ingress/api/v1/solo-kit.json delete mode 100644 projects/ingress/cmd/Dockerfile delete mode 100644 projects/ingress/cmd/Dockerfile.distroless delete mode 100644 projects/ingress/cmd/main.go delete mode 100644 projects/ingress/pkg/api/ingress/resource_client.go delete mode 100644 projects/ingress/pkg/api/service/resource_client.go delete mode 100644 projects/ingress/pkg/api/service/service_client.go delete mode 100644 projects/ingress/pkg/api/v1/ingress.pb.clone.go delete mode 100644 projects/ingress/pkg/api/v1/ingress.pb.equal.go delete mode 100644 projects/ingress/pkg/api/v1/ingress.pb.go delete mode 100644 projects/ingress/pkg/api/v1/ingress.pb.hash.go delete mode 100644 projects/ingress/pkg/api/v1/ingress.pb.uniquehash.go delete mode 100644 projects/ingress/pkg/api/v1/ingress.sk.go delete mode 100644 projects/ingress/pkg/api/v1/ingress_client.sk.go delete mode 100644 projects/ingress/pkg/api/v1/ingress_reconciler.sk.go delete mode 100644 projects/ingress/pkg/api/v1/kube_service.sk.go delete mode 100644 projects/ingress/pkg/api/v1/kube_service_client.sk.go delete mode 100644 projects/ingress/pkg/api/v1/kube_service_reconciler.sk.go delete mode 100644 projects/ingress/pkg/api/v1/service.pb.clone.go delete mode 100644 projects/ingress/pkg/api/v1/service.pb.equal.go delete mode 100644 projects/ingress/pkg/api/v1/service.pb.go delete mode 100644 projects/ingress/pkg/api/v1/service.pb.hash.go delete mode 100644 projects/ingress/pkg/api/v1/service.pb.uniquehash.go delete mode 100644 projects/ingress/pkg/api/v1/status_event_loop.sk.go delete mode 100644 projects/ingress/pkg/api/v1/status_simple_event_loop.sk.go delete mode 100644 projects/ingress/pkg/api/v1/status_snapshot.sk.go delete mode 100644 projects/ingress/pkg/api/v1/status_snapshot_emitter.sk.go delete mode 100644 projects/ingress/pkg/api/v1/status_snapshot_simple_emitter.sk.go delete mode 100644 projects/ingress/pkg/api/v1/translator_event_loop.sk.go delete mode 100644 projects/ingress/pkg/api/v1/translator_simple_event_loop.sk.go delete mode 100644 projects/ingress/pkg/api/v1/translator_snapshot.sk.go delete mode 100644 projects/ingress/pkg/api/v1/translator_snapshot_emitter.sk.go delete mode 100644 projects/ingress/pkg/api/v1/translator_snapshot_simple_emitter.sk.go delete mode 100644 projects/ingress/pkg/setup/opts.go delete mode 100644 projects/ingress/pkg/setup/setup.go delete mode 100644 projects/ingress/pkg/setup/setup_syncer.go delete mode 100644 projects/ingress/pkg/status/status_syncer.go delete mode 100644 projects/ingress/pkg/translator/translate.go delete mode 100644 projects/ingress/pkg/translator/translate_test.go delete mode 100644 projects/ingress/pkg/translator/translator_suite_test.go delete mode 100644 projects/ingress/pkg/translator/translator_syncer.go delete mode 100644 projects/knative/README.md delete mode 100644 projects/knative/api/external/knative/ingress.go delete mode 100644 projects/knative/api/external/knative/solo-kit.json delete mode 100644 projects/knative/api/v1/solo-kit.json delete mode 100644 projects/knative/pkg/api/custom/knative/cache.go delete mode 100644 projects/knative/pkg/api/custom/knative/knative_clusteringress_client.go delete mode 100644 projects/knative/pkg/api/external/knative/ingress.sk.go delete mode 100644 projects/knative/pkg/api/external/knative/ingress_client.sk.go delete mode 100644 projects/knative/pkg/api/external/knative/ingress_reconciler.sk.go delete mode 100644 projects/knative/pkg/api/v1/translator_event_loop.sk.go delete mode 100644 projects/knative/pkg/api/v1/translator_simple_event_loop.sk.go delete mode 100644 projects/knative/pkg/api/v1/translator_snapshot.sk.go delete mode 100644 projects/knative/pkg/api/v1/translator_snapshot_emitter.sk.go delete mode 100644 projects/knative/pkg/api/v1/translator_snapshot_simple_emitter.sk.go delete mode 100644 projects/knative/pkg/translator/translate.go delete mode 100644 projects/knative/pkg/translator/translate_test.go delete mode 100644 projects/knative/pkg/translator/translator_suite_test.go delete mode 100644 projects/knative/pkg/translator/translator_syncer.go delete mode 100644 projects/knative/pkg/translator/translator_syncer_test.go delete mode 100644 test/e2e/access_log_test.go delete mode 100644 test/e2e/grpcweb_test.go delete mode 100644 test/kube2e/gloo/resource_client_test.go diff --git a/Makefile b/Makefile index cf89f5fe0ae..df3d48f8686 100644 --- a/Makefile +++ b/Makefile @@ -419,74 +419,6 @@ distroless-with-utils-docker: distroless-docker $(DISTROLESS_OUTPUT_DIR)/Dockerf --build-arg BASE_IMAGE=$(GLOO_DISTROLESS_BASE_IMAGE) \ -t $(GLOO_DISTROLESS_BASE_WITH_UTILS_IMAGE) $(QUAY_EXPIRATION_LABEL) -#---------------------------------------------------------------------------------- -# Ingress -#---------------------------------------------------------------------------------- - -INGRESS_DIR=projects/ingress -INGRESS_SOURCES=$(call get_sources,$(INGRESS_DIR)) -INGRESS_OUTPUT_DIR=$(OUTPUT_DIR)/$(INGRESS_DIR) - -$(INGRESS_OUTPUT_DIR)/ingress-linux-$(GOARCH): $(INGRESS_SOURCES) - $(GO_BUILD_FLAGS) GOOS=linux go build -ldflags=$(LDFLAGS) -gcflags=$(GCFLAGS) -o $@ $(INGRESS_DIR)/cmd/main.go - -.PHONY: ingress -ingress: $(INGRESS_OUTPUT_DIR)/ingress-linux-$(GOARCH) - -$(INGRESS_OUTPUT_DIR)/Dockerfile.ingress: $(INGRESS_DIR)/cmd/Dockerfile - cp $< $@ - -.PHONY: ingress-docker -ingress-docker: $(INGRESS_OUTPUT_DIR)/ingress-linux-$(GOARCH) $(INGRESS_OUTPUT_DIR)/Dockerfile.ingress - docker buildx build --load $(PLATFORM) $(INGRESS_OUTPUT_DIR) -f $(INGRESS_OUTPUT_DIR)/Dockerfile.ingress \ - --build-arg BASE_IMAGE=$(ALPINE_BASE_IMAGE) \ - --build-arg GOARCH=$(GOARCH) \ - -t $(IMAGE_REGISTRY)/ingress:$(VERSION) $(QUAY_EXPIRATION_LABEL) - -$(INGRESS_OUTPUT_DIR)/Dockerfile.ingress.distroless: $(INGRESS_DIR)/cmd/Dockerfile.distroless - cp $< $@ - -.PHONY: ingress-distroless-docker -ingress-distroless-docker: $(INGRESS_OUTPUT_DIR)/ingress-linux-$(GOARCH) $(INGRESS_OUTPUT_DIR)/Dockerfile.ingress.distroless distroless-docker - docker buildx build --load $(PLATFORM) $(INGRESS_OUTPUT_DIR) -f $(INGRESS_OUTPUT_DIR)/Dockerfile.ingress.distroless \ - --build-arg BASE_IMAGE=$(GLOO_DISTROLESS_BASE_IMAGE) \ - --build-arg GOARCH=$(GOARCH) \ - -t $(IMAGE_REGISTRY)/ingress:$(VERSION)-distroless $(QUAY_EXPIRATION_LABEL) - -#---------------------------------------------------------------------------------- -# Access Logger -#---------------------------------------------------------------------------------- - -ACCESS_LOG_DIR=projects/accesslogger -ACCESS_LOG_SOURCES=$(call get_sources,$(ACCESS_LOG_DIR)) -ACCESS_LOG_OUTPUT_DIR=$(OUTPUT_DIR)/$(ACCESS_LOG_DIR) - -$(ACCESS_LOG_OUTPUT_DIR)/access-logger-linux-$(GOARCH): $(ACCESS_LOG_SOURCES) - $(GO_BUILD_FLAGS) GOOS=linux go build -ldflags=$(LDFLAGS) -gcflags=$(GCFLAGS) -o $@ $(ACCESS_LOG_DIR)/cmd/main.go - -.PHONY: access-logger -access-logger: $(ACCESS_LOG_OUTPUT_DIR)/access-logger-linux-$(GOARCH) - -$(ACCESS_LOG_OUTPUT_DIR)/Dockerfile.access-logger: $(ACCESS_LOG_DIR)/cmd/Dockerfile - cp $< $@ - -.PHONY: access-logger-docker -access-logger-docker: $(ACCESS_LOG_OUTPUT_DIR)/access-logger-linux-$(GOARCH) $(ACCESS_LOG_OUTPUT_DIR)/Dockerfile.access-logger - docker buildx build --load $(PLATFORM) $(ACCESS_LOG_OUTPUT_DIR) -f $(ACCESS_LOG_OUTPUT_DIR)/Dockerfile.access-logger \ - --build-arg BASE_IMAGE=$(ALPINE_BASE_IMAGE) \ - --build-arg GOARCH=$(GOARCH) \ - -t $(IMAGE_REGISTRY)/access-logger:$(VERSION) $(QUAY_EXPIRATION_LABEL) - -$(ACCESS_LOG_OUTPUT_DIR)/Dockerfile.access-logger.distroless: $(ACCESS_LOG_DIR)/cmd/Dockerfile.distroless - cp $< $@ - -.PHONY: access-logger-distroless-docker -access-logger-distroless-docker: $(ACCESS_LOG_OUTPUT_DIR)/access-logger-linux-$(GOARCH) $(ACCESS_LOG_OUTPUT_DIR)/Dockerfile.access-logger.distroless distroless-docker - docker buildx build --load $(PLATFORM) $(ACCESS_LOG_OUTPUT_DIR) -f $(ACCESS_LOG_OUTPUT_DIR)/Dockerfile.access-logger.distroless \ - --build-arg BASE_IMAGE=$(GLOO_DISTROLESS_BASE_IMAGE) \ - --build-arg GOARCH=$(GOARCH) \ - -t $(IMAGE_REGISTRY)/access-logger:$(VERSION)-distroless $(QUAY_EXPIRATION_LABEL) - #---------------------------------------------------------------------------------- # Discovery #---------------------------------------------------------------------------------- @@ -881,8 +813,6 @@ docker-standard: discovery-docker docker-standard: gloo-envoy-wrapper-docker docker-standard: sds-docker docker-standard: certgen-docker -docker-standard: ingress-docker -docker-standard: access-logger-docker docker-standard: kubectl-docker .PHONY: docker-distroless @@ -892,8 +822,6 @@ docker-distroless: discovery-distroless-docker docker-distroless: gloo-envoy-wrapper-distroless-docker docker-distroless: sds-distroless-docker docker-distroless: certgen-distroless-docker -docker-distroless: ingress-distroless-docker -docker-distroless: access-logger-distroless-docker docker-distroless: kubectl-distroless-docker IMAGE_VARIANT ?= all @@ -917,7 +845,6 @@ docker-standard-push: docker-push-sds ifeq ($(MULTIARCH), ) docker-standard-push: docker-push-certgen endif -docker-standard-push: docker-push-ingress docker-standard-push: docker-push-access-logger ifeq ($(MULTIARCH), ) docker-standard-push: docker-push-kubectl @@ -931,7 +858,6 @@ docker-distroless-push: docker-push-sds-distroless ifeq ($(MULTIARCH), ) docker-distroless-push: docker-push-certgen-distroless endif -docker-distroless-push: docker-push-ingress-distroless docker-distroless-push: docker-push-access-logger-distroless ifeq ($(MULTIARCH), ) docker-distroless-push: docker-push-kubectl-distroless @@ -954,8 +880,6 @@ docker-standard-retag: docker-retag-discovery docker-standard-retag: docker-retag-gloo-envoy-wrapper docker-standard-retag: docker-retag-sds docker-standard-retag: docker-retag-certgen -docker-standard-retag: docker-retag-ingress -docker-standard-retag: docker-retag-access-logger docker-standard-retag: docker-retag-kubectl .PHONY: docker-distroless-retag @@ -964,8 +888,6 @@ docker-distroless-retag: docker-retag-discovery-distroless docker-distroless-retag: docker-retag-gloo-envoy-wrapper-distroless docker-distroless-retag: docker-retag-sds-distroless docker-distroless-retag: docker-retag-certgen-distroless -docker-distroless-retag: docker-retag-ingress-distroless -docker-distroless-retag: docker-retag-access-logger-distroless docker-distroless-retag: docker-retag-kubectl-distroless # Re-tag docker images previously pushed to the ORIGINAL_IMAGE_REGISTRY, @@ -1039,8 +961,6 @@ kind-build-and-load-standard: kind-build-and-load-discovery kind-build-and-load-standard: kind-build-and-load-gloo-envoy-wrapper kind-build-and-load-standard: kind-build-and-load-sds kind-build-and-load-standard: kind-build-and-load-certgen -kind-build-and-load-standard: kind-build-and-load-ingress -kind-build-and-load-standard: kind-build-and-load-access-logger kind-build-and-load-standard: kind-build-and-load-kubectl .PHONY: kind-build-and-load-distroless @@ -1049,8 +969,6 @@ kind-build-and-load-distroless: kind-build-and-load-discovery-distroless kind-build-and-load-distroless: kind-build-and-load-gloo-envoy-wrapper-distroless kind-build-and-load-distroless: kind-build-and-load-sds-distroless kind-build-and-load-distroless: kind-build-and-load-certgen-distroless -kind-build-and-load-distroless: kind-build-and-load-ingress-distroless -kind-build-and-load-distroless: kind-build-and-load-access-logger-distroless kind-build-and-load-distroless: kind-build-and-load-kubectl-distroless .PHONY: kind-build-and-load ## Use to build all images and load them into kind @@ -1072,8 +990,6 @@ kind-load-standard: kind-load-discovery kind-load-standard: kind-load-gloo-envoy-wrapper kind-load-standard: kind-load-sds kind-load-standard: kind-load-certgen -kind-load-standard: kind-load-ingress -kind-load-standard: kind-load-access-logger kind-load-standard: kind-load-kubectl .PHONY: kind-build-and-load-distroless @@ -1082,8 +998,6 @@ kind-load-distroless: kind-load-discovery-distroless kind-load-distroless: kind-load-gloo-envoy-wrapper-distroless kind-load-distroless: kind-load-sds-distroless kind-load-distroless: kind-load-certgen-distroless -kind-load-distroless: kind-load-ingress-distroless -kind-load-distroless: kind-load-access-logger-distroless kind-load-distroless: kind-load-kubectl-distroless .PHONY: kind-load ## Use to build all images and load them into kind @@ -1181,7 +1095,7 @@ scan-version: ## Scan all Gloo images with the tag matching {VERSION} env variab PATH=$(DEPSGOBIN):$$PATH GO111MODULE=on go run github.com/solo-io/go-utils/securityscanutils/cli scan-version -v \ -r $(IMAGE_REGISTRY)\ -t $(VERSION)\ - --images gloo,gloo-envoy-wrapper,discovery,ingress,sds,certgen,access-logger,kubectl + --images gloo,gloo-envoy-wrapper,discovery,sds,certgen,kubectl #---------------------------------------------------------------------------------- # Third Party License Management diff --git a/docs/content/static/content/osa_provided.md b/docs/content/static/content/osa_provided.md index 7ec52b5acaf..a3161fb1ed1 100644 --- a/docs/content/static/content/osa_provided.md +++ b/docs/content/static/content/osa_provided.md @@ -78,7 +78,6 @@ Name|Version|License [k8s.io/kube-openapi](https://k8s.io/kube-openapi)|v0.0.0-20240423202451-8948a665c108|Apache License 2.0 [k8s.io/kubectl](https://k8s.io/kubectl)|v0.31.1|Apache License 2.0 [k8s.io/utils](https://k8s.io/utils)|v0.0.0-20240711033017-18e509b52bc8|Apache License 2.0 -[knative.dev/networking](https://knative.dev/networking)|v0.0.0-20211210083629-bace06e98aee|Apache License 2.0 [knative.dev/pkg](https://knative.dev/pkg)|v0.0.0-20211206113427-18589ac7627e|Apache License 2.0 [sigs.k8s.io/controller-runtime](https://sigs.k8s.io/controller-runtime)|v0.19.1|Apache License 2.0 [sigs.k8s.io/controller-tools](https://sigs.k8s.io/controller-tools)|v0.16.5|Apache License 2.0 diff --git a/go.mod b/go.mod index d538671c50a..f13114443f3 100644 --- a/go.mod +++ b/go.mod @@ -77,7 +77,6 @@ require ( k8s.io/component-base v0.31.2 k8s.io/kubectl v0.31.2 k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 - knative.dev/networking v0.0.0-20211210083629-bace06e98aee knative.dev/pkg v0.0.0-20211206113427-18589ac7627e sigs.k8s.io/controller-runtime v0.19.1 sigs.k8s.io/gateway-api v1.2.0 diff --git a/go.sum b/go.sum index ab5bb1c2905..992c8110192 100644 --- a/go.sum +++ b/go.sum @@ -2641,7 +2641,6 @@ github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtIS github.com/rotisserie/eris v0.1.1/go.mod h1:2ik3CyJrzlOjGyDGrKfqZivSfmkhCS3ktE+T1mNzzLk= github.com/rotisserie/eris v0.5.4 h1:Il6IvLdAapsMhvuOahHWiBnl1G++Q0/L5UIkI5mARSk= github.com/rotisserie/eris v0.5.4/go.mod h1:Z/kgYTJiJtocxCbFfvRmO+QejApzG6zpyky9G1A4g9s= -github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.9.1/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= @@ -3425,7 +3424,6 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -4084,9 +4082,6 @@ k8s.io/metrics v0.31.1/go.mod h1:JuH1S9tJiH9q1VCY0yzSCawi7kzNLsDzlWDJN4xR+iA= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= knative.dev/hack v0.0.0-20211122162614-813559cefdda/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= -knative.dev/hack v0.0.0-20211203062838-e11ac125e707/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= -knative.dev/networking v0.0.0-20211210083629-bace06e98aee h1:tMn0wtCgn2X+i+JiRk0mYHGrzI/7dZOkCPogz3u3qTU= -knative.dev/networking v0.0.0-20211210083629-bace06e98aee/go.mod h1:5ARXhzfos0DMVbPwQ6bQC+vz/9t7smRYBwVrmVaSHrU= knative.dev/pkg v0.0.0-20211206113427-18589ac7627e h1:8hK7g4jz56ZtF3iQLsvrvR/hHnP5ZWLSxLnyYMbfxwY= knative.dev/pkg v0.0.0-20211206113427-18589ac7627e/go.mod h1:E6B4RTjZyxe55a0kxOlnEHEl71zuG7gghnqYvNBKwBw= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= diff --git a/install/helm/gloo/generate.go b/install/helm/gloo/generate.go index 8ef854b2686..4dca714e050 100644 --- a/install/helm/gloo/generate.go +++ b/install/helm/gloo/generate.go @@ -168,11 +168,6 @@ func generateValuesConfig(version, repositoryPrefix, globalPullPolicy string) (* cfg.Gateway.RolloutJob.Image.Tag = &version cfg.Gateway.CleanupJob.Image.Tag = &version - cfg.AccessLogger.Image.Tag = &version - - cfg.Ingress.Deployment.Image.Tag = &version - cfg.IngressProxy.Deployment.Image.Tag = &version - cfg.Settings.Integrations.Knative.Proxy.Image.Tag = &version cfg.Global.GlooMtls.Sds.Image.Tag = &version cfg.Global.GlooMtls.EnvoySidecar.Image.Tag = &version diff --git a/install/helm/gloo/generate/values.go b/install/helm/gloo/generate/values.go index 2aa2f193b5f..7365ec357cf 100644 --- a/install/helm/gloo/generate/values.go +++ b/install/helm/gloo/generate/values.go @@ -18,10 +18,7 @@ type Config struct { Discovery *Discovery `json:"discovery,omitempty"` Gateway *Gateway `json:"gateway,omitempty"` GatewayProxies map[string]GatewayProxy `json:"gatewayProxies,omitempty"` - Ingress *Ingress `json:"ingress,omitempty"` - IngressProxy *IngressProxy `json:"ingressProxy,omitempty"` K8s *K8s `json:"k8s,omitempty"` - AccessLogger *AccessLogger `json:"accessLogger,omitempty"` } type Global struct { @@ -133,7 +130,6 @@ type KubeResourceOverride struct { } type Integrations struct { - Knative *Knative `json:"knative,omitempty"` Consul *Consul `json:"consul,omitempty" desc:"Consul settings to inject into the consul client on startup"` ConsulUpstreamDiscovery *ConsulUpstreamDiscovery `json:"consulUpstreamDiscovery,omitempty" desc:"Settings for Gloo Edge's behavior when discovering consul services and creating upstreams for them."` } @@ -178,41 +174,6 @@ type Duration struct { Nanos *int32 `json:"nanos,omitempty" desc:"The value of this duration in nanoseconds."` } -type Knative struct { - Enabled *bool `json:"enabled,omitempty" desc:"enabled knative components"` - Version *string `json:"version,omitempty" desc:"the version of knative installed to the cluster. if using version < 0.8.0, Gloo Edge will use Knative's ClusterIngress API for configuration rather than the namespace-scoped Ingress"` - Proxy *KnativeProxy `json:"proxy,omitempty"` - RequireIngressClass *bool `json:"requireIngressClass,omitempty" desc:"only serve traffic for Knative Ingress objects with the annotation 'networking.knative.dev/ingress.class: gloo.ingress.networking.knative.dev'."` - ExtraKnativeInternalLabels map[string]string `json:"extraKnativeInternalLabels,omitempty" desc:"Optional extra key-value pairs to add to the spec.template.metadata.labels data of the knative internal deployment."` - ExtraKnativeInternalAnnotations map[string]string `json:"extraKnativeInternalAnnotations,omitempty" desc:"Optional extra key-value pairs to add to the spec.template.metadata.annotations data of the knative internal deployment."` - ExtraKnativeExternalLabels map[string]string `json:"extraKnativeExternalLabels,omitempty" desc:"Optional extra key-value pairs to add to the spec.template.metadata.labels data of the knative external deployment."` - ExtraKnativeExternalAnnotations map[string]string `json:"extraKnativeExternalAnnotations,omitempty" desc:"Optional extra key-value pairs to add to the spec.template.metadata.annotations data of the knative external deployment."` -} - -type KnativeProxy struct { - Image *Image `json:"image,omitempty"` - HttpPort *int `json:"httpPort,omitempty" desc:"HTTP port for the proxy"` - HttpsPort *int `json:"httpsPort,omitempty" desc:"HTTPS port for the proxy"` - Tracing *string `json:"tracing,omitempty" desc:"tracing configuration"` - RunAsUser *float64 `json:"runAsUser,omitempty" desc:"Explicitly set the user ID for the pod to run as. Default is 10101"` - LoopBackAddress *string `json:"loopBackAddress,omitempty" desc:"Name on which to bind the loop-back interface for this instance of Envoy. Defaults to 127.0.0.1, but other common values may be localhost or ::1"` - Stats *bool `json:"stats,omitempty" desc:"Controls whether or not Envoy stats are enabled"` - ExtraClusterIngressProxyLabels map[string]string `json:"extraClusterIngressProxyLabels,omitempty" desc:"Optional extra key-value pairs to add to the spec.template.metadata.labels data of the cluster ingress proxy deployment."` - ExtraClusterIngressProxyAnnotations map[string]string `json:"extraClusterIngressProxyAnnotations,omitempty" desc:"Optional extra key-value pairs to add to the spec.template.metadata.annotations data of the cluster ingress proxy deployment."` - Internal *KnativeProxyInternal `json:"internal,omitempty" desc:"kube resource overrides for knative internal proxy resources"` - *DeploymentSpec - *ServiceSpec - ConfigMap *KubeResourceOverride `json:"configMap,omitempty"` - Deployment *KubeResourceOverride `json:"deployment,omitempty"` - ContainerSecurityContext *SecurityContext `json:"containerSecurityContext,omitempty" desc:"securityContext for knative proxy containers. See [security context](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#securitycontext-v1-core) for details."` -} - -type KnativeProxyInternal struct { - Deployment *KubeResourceOverride `json:"deployment,omitempty"` - Service *KubeResourceOverride `json:"service,omitempty"` - ConfigMap *KubeResourceOverride `json:"configMap,omitempty"` -} - type Settings struct { WatchNamespaces []string `json:"watchNamespaces,omitempty" desc:"whitelist of namespaces for Gloo Edge to watch for services and CRDs. Empty list means all namespaces. If this and WatchNamespaceSelectors are specified, this takes precedence and WatchNamespaceSelectors is ignored"` WatchNamespaceSelectors interface{} `json:"watchNamespaceSelectors,omitempty" desc:"A list of Kubernetes selectors that specify the set of namespaces to restrict the namespaces that Gloo controllers take into consideration when watching for resources. Elements in the list are disjunctive (OR semantics), i.e. a namespace will be included if it matches any selector. An empty list means all namespaces. If this and WatchNamespaces are specified, WatchNamespaces takes precedence and this is ignored"` @@ -741,63 +702,6 @@ type Failover struct { *KubeResourceOverride } -type AccessLogger struct { - Image *Image `json:"image,omitempty"` - Port *uint `json:"port,omitempty"` - ServiceName *string `json:"serviceName,omitempty"` - Enabled *bool `json:"enabled,omitempty"` - Stats *Stats `json:"stats,omitempty" desc:"overrides for prometheus stats published by the access logging pod"` - RunAsUser *float64 `json:"runAsUser,omitempty" desc:"Explicitly set the user ID for the processes in the container to run as. Default is 10101."` - FsGroup *float64 `json:"fsGroup,omitempty" desc:"Explicitly set the group ID for volume ownership. Default is 10101"` - ExtraAccessLoggerLabels map[string]string `json:"extraAccessLoggerLabels,omitempty" desc:"Optional extra key-value pairs to add to the spec.template.metadata.labels data of the access logger deployment."` - ExtraAccessLoggerAnnotations map[string]string `json:"extraAccessLoggerAnnotations,omitempty" desc:"Optional extra key-value pairs to add to the spec.template.metadata.annotations data of the access logger deployment."` - Service *KubeResourceOverride `json:"service,omitempty"` - Deployment *KubeResourceOverride `json:"deployment,omitempty"` - AccessLoggerContainerSecurityContext *SecurityContext `json:"accessLoggerContainerSecurityContext,omitempty" desc:"Security context for the access logger deployment. If this is defined it supercedes any values set in FloatingUserId or RunAsUser. See [security context](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#securitycontext-v1-core) for details.""` - *DeploymentSpec -} - -type Ingress struct { - Enabled *bool `json:"enabled,omitempty"` - Deployment *IngressDeployment `json:"deployment,omitempty"` - RequireIngressClass *bool `json:"requireIngressClass,omitempty" desc:"only serve traffic for Ingress objects with the Ingress Class annotation 'kubernetes.io/ingress.class'. By default the annotation value must be set to 'gloo', however this can be overridden via customIngressClass."` - CustomIngress *bool `json:"customIngressClass,omitempty" desc:"Only relevant when requireIngressClass is set to true. Setting this value will cause the Gloo Edge Ingress Controller to process only those Ingress objects which have their ingress class set to this value (e.g. 'kubernetes.io/ingress.class=SOMEVALUE')."` -} - -type IngressDeployment struct { - Image *Image `json:"image,omitempty"` - RunAsUser *float64 `json:"runAsUser,omitempty" desc:"Explicitly set the user ID for the processes in the container to run as. Default is 10101."` - FloatingUserId *bool `json:"floatingUserId,omitempty" desc:"If true, allows the cluster to dynamically assign a user ID for the processes running in the container."` - ExtraIngressLabels map[string]string `json:"extraIngressLabels,omitempty" desc:"Optional extra key-value pairs to add to the spec.template.metadata.labels data of the ingress deployment."` - ExtraIngressAnnotations map[string]string `json:"extraIngressAnnotations,omitempty" desc:"Optional extra key-value pairs to add to the spec.template.metadata.annotations data of the ingress deployment."` - Stats *bool `json:"stats,omitempty" desc:"Controls whether or not Envoy stats are enabled"` - IngressContainerSecurityContext *SecurityContext `json:"ingressContainerSecurityContext,omitempty" desc:"Security context for the ingress deployment. If this is defined it supercedes any values set in FloatingUserId or RunAsUser. See [security context](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#securitycontext-v1-core) for details."` - *DeploymentSpec -} - -type IngressProxy struct { - Deployment *IngressProxyDeployment `json:"deployment,omitempty"` - ConfigMap *ConfigMap `json:"configMap,omitempty"` - Tracing *string `json:"tracing,omitempty"` - LoopBackAddress *string `json:"loopBackAddress,omitempty" desc:"Name on which to bind the loop-back interface for this instance of Envoy. Defaults to 127.0.0.1, but other common values may be localhost or ::1"` - Label *string `json:"label,omitempty" desc:"Value for label gloo. Use a unique value to use several ingress proxy instances in the same cluster. Default is ingress-proxy"` - *ServiceSpec -} - -type IngressProxyDeployment struct { - Image *Image `json:"image,omitempty"` - HttpPort *int `json:"httpPort,omitempty" desc:"HTTP port for the ingress container"` - HttpsPort *int `json:"httpsPort,omitempty" desc:"HTTPS port for the ingress container"` - ExtraPorts []interface{} `json:"extraPorts,omitempty"` - ExtraAnnotations map[string]string `json:"extraAnnotations,omitempty"` - FloatingUserId *bool `json:"floatingUserId,omitempty" desc:"If true, allows the cluster to dynamically assign a user ID for the processes running in the container."` - RunAsUser *float64 `json:"runAsUser,omitempty" desc:"Explicitly set the user ID for the pod to run as. Default is 10101"` - ExtraIngressProxyLabels map[string]string `json:"extraIngressProxyLabels,omitempty" desc:"Optional extra key-value pairs to add to the spec.template.metadata.labels data of the ingress proxy deployment."` - Stats *bool `json:"stats,omitempty" desc:"Controls whether or not Envoy stats are enabled"` - IngressProxyContainerSecurityContext *SecurityContext `json:"ingressProxyContainerSecurityContext,omitempty" desc:"Security context for the ingress proxy deployment. If this is defined it supercedes any values set in FloatingUserId or RunAsUser. See [security context](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#securitycontext-v1-core) for details."` - *DeploymentSpec -} - type ServiceSpec struct { Service *Service `json:"service,omitempty" desc:"K8s service configuration"` } diff --git a/install/helm/gloo/templates/10-ingress-deployment.yaml b/install/helm/gloo/templates/10-ingress-deployment.yaml deleted file mode 100644 index 49056812bac..00000000000 --- a/install/helm/gloo/templates/10-ingress-deployment.yaml +++ /dev/null @@ -1,98 +0,0 @@ -{{- define "ingress.deploymentSpec"}} -{{- if or (.Values.ingress.enabled) (.Values.settings.integrations.knative.enabled) }} -{{- $image := .Values.ingress.deployment.image }} -{{- if .Values.global }} -{{- $image = merge .Values.ingress.deployment.image .Values.global.image }} -{{- end }} -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: ingress - name: ingress - namespace: {{ .Release.Namespace }} -spec: - replicas: {{ .Values.ingress.deployment.replicas }} - selector: - matchLabels: - gloo: ingress - template: - metadata: - labels: - gloo: ingress - {{- if .Values.ingress.deployment.extraIngressLabels }} - {{- range $key, $value := .Values.ingress.deployment.extraIngressLabels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - {{- if .Values.global.istioIntegration.disableAutoinjection }} - sidecar.istio.io/inject: "false" - {{- end }} - annotations: - {{- if .Values.ingress.deployment.extraIngressAnnotations }} - {{- range $key, $value := .Values.ingress.deployment.extraIngressAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - spec: - {{- include "gloo.pullSecret" $image | nindent 6 -}} - {{- include "gloo.podSpecStandardFields" .Values.ingress.deployment | nindent 6 -}} - securityContext: - runAsNonRoot: true - {{- if not .Values.ingress.deployment.floatingUserId }} - runAsUser: {{ printf "%.0f" (float64 .Values.ingress.deployment.runAsUser) -}} - {{- end }} - containers: - - image: {{template "gloo.image" $image}} - imagePullPolicy: {{ $image.pullPolicy }} - name: ingress - {{- include "gloo.containerSecurityContext" (dict "values" .Values.ingress.deployment.ingressContainerSecurityContext "podSecurityStandards" .Values.global.podSecurityStandards "indent" 8 "globalSec" .Values.global.securitySettings) }} -{{- if .Values.ingress.deployment.resources }} - resources: -{{ toYaml .Values.ingress.deployment.resources | indent 10}} -{{- end}} - env: -{{- if .Values.ingress.deployment.customEnv }} -{{ toYaml .Values.ingress.deployment.customEnv | indent 8 }} -{{- end }} - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace -{{- if .Values.settings.integrations.knative.enabled }} - - name: "ENABLE_KNATIVE_INGRESS" - value: "true" -{{- end }} -{{- if .Values.settings.integrations.knative.version }} - - name: "KNATIVE_VERSION" - value: "{{.Values.settings.integrations.knative.version}}" -{{- end }} - -{{- if not (.Values.ingress.enabled) }} - - name: "DISABLE_KUBE_INGRESS" - value: "true" -{{- else }} - - {{- if or .Values.ingress.requireIngressClass .Values.settings.integrations.knative.requireIngressClass }} - - name: "REQUIRE_INGRESS_CLASS" - value: "true" - {{- end }} - - {{- if and .Values.ingress.customIngressClass }} - - name: "CUSTOM_INGRESS_CLASS" - value: "{{ .Values.ingress.customIngressClass }}" - {{- end }} -{{- end }} -{{- end }} {{/* if or (.Values.ingress.enabled) (.Values.settings.integrations.knative.enabled) */}} -{{- end }} {{/* define "ingress.deploymentSpec" */}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.ingress.deployment -}} -{{- if .Values.ingress.deployment.kubeResourceOverride -}} -{{- $kubeResourceOverride = .Values.ingress.deployment.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "ingress.deploymentSpec") -}} - diff --git a/install/helm/gloo/templates/11-ingress-proxy-deployment.yaml b/install/helm/gloo/templates/11-ingress-proxy-deployment.yaml deleted file mode 100644 index 03ffbe571a1..00000000000 --- a/install/helm/gloo/templates/11-ingress-proxy-deployment.yaml +++ /dev/null @@ -1,101 +0,0 @@ -{{- define "ingressProxy.deploymentSpec"}} -{{- if .Values.ingress.enabled }} -{{- $image := .Values.ingressProxy.deployment.image }} -{{- if .Values.global }} -{{- $image = merge .Values.ingressProxy.deployment.image .Values.global.image }} -{{- end }} -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: {{ .Values.ingressProxy.label }} - name: ingress-proxy - namespace: {{ .Release.Namespace }} -spec: - replicas: {{ .Values.ingressProxy.deployment.replicas }} - selector: - matchLabels: - gloo: ingress-proxy - template: - metadata: - labels: - gloo: {{ .Values.ingressProxy.label }} - {{- if .Values.ingressProxy.deployment.extraIngressProxyLabels }} - {{- range $key, $value := .Values.ingressProxy.deployment.extraIngressProxyLabels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - {{- if .Values.global.istioIntegration.disableAutoinjection }} - sidecar.istio.io/inject: "false" - {{- end }} - annotations: - checksum/ingress-envoy-config: {{ include (print .Template.BasePath "/12-ingress-proxy-configmap.yaml") . | sha256sum }} - {{- if .Values.ingressProxy.deployment.extraAnnotations }} - {{- range $key, $value := .Values.ingressProxy.deployment.extraAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - spec: - {{- include "gloo.pullSecret" $image | nindent 6 -}} - containers: - - args: ["--disable-hot-restart"] - env: -{{- if .Values.ingressProxy.deployment.customEnv }} -{{ toYaml .Values.ingressProxy.deployment.customEnv | indent 8 }} -{{- end }} - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: INGRESS_PROXY_LABEL - value: {{ .Values.ingressProxy.label }} - image: {{template "gloo.image" $image}} - imagePullPolicy: {{ $image.pullPolicy }} - name: ingress-proxy -{{- if .Values.ingressProxy.deployment.resources }} - resources: -{{ toYaml .Values.ingressProxy.deployment.resources | indent 10}} -{{- end}} - {{- $capabilities := dict "drop" (list "ALL") "add" (list "NET_BIND_SERVICE") -}} - {{- $securityDefaults := dict - "readOnlyRootFilesystem" true - "allowPrivilegeEscalation" false - "capabilities" $capabilities - }} - {{- if .Values.ingressProxy.deployment.runAsUser -}} - {{- $_ := set $securityDefaults "runAsUser" .Values.ingressProxy.deployment.runAsUser }} - {{- end -}} - {{- include "gloo.containerSecurityContext" (dict "values" .Values.ingressProxy.deployment.ingressProxyContainerSecurityContext "defaults" $securityDefaults "podSecurityStandards" .Values.global.podSecurityStandards "indent" 8 "globalSec" .Values.global.securitySettings) }} - ports: - - containerPort: {{ .Values.ingressProxy.deployment.httpPort }} - name: http - protocol: TCP - - containerPort: {{ .Values.ingressProxy.deployment.httpsPort }} - name: https - protocol: TCP -{{- with .Values.ingressProxy.deployment.extraPorts }} -{{toYaml . | indent 8}}{{- end }} - volumeMounts: - - mountPath: /etc/envoy - name: envoy-config - volumes: - - configMap: - name: ingress-envoy-config - name: envoy-config - {{- include "gloo.podSpecStandardFields" .Values.ingress.deployment | nindent 6 -}} -{{- end }} {{/* if .Values.ingress.enabled */}} -{{- end }} {{/* define "ingressProxy.deploymentSpec "*/}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.ingressProxy -}} -{{- if .Values.ingressProxy.deployment -}} -{{- $kubeResourceOverride = .Values.ingressProxy.deployment.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "ingressProxy.deploymentSpec") -}} diff --git a/install/helm/gloo/templates/12-ingress-proxy-configmap.yaml b/install/helm/gloo/templates/12-ingress-proxy-configmap.yaml deleted file mode 100644 index 60e7feb6728..00000000000 --- a/install/helm/gloo/templates/12-ingress-proxy-configmap.yaml +++ /dev/null @@ -1,156 +0,0 @@ -{{- define "ingressProxy.configMapSpec"}} -{{- if .Values.ingress.enabled }} -# configmap -apiVersion: v1 -kind: ConfigMap -metadata: - name: ingress-envoy-config - namespace: {{ .Release.Namespace }} - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: {{ .Values.ingressProxy.label }} -data: -{{ if (empty .Values.ingressProxy.configMap.data) }} - envoy.yaml: | - layered_runtime: - layers: - - name: static_layer - static_layer: - overload: - global_downstream_max_connections: 250000 - - name: admin_layer - admin_layer: {} - node: - cluster: ingress - id: "{{ `{{.PodName}}.{{.PodNamespace}}` }}" - metadata: - # role's value is the key for the in-memory xds cache (projects/gloo/pkg/xds/envoy.go) - role: "{{ `{{.PodNamespace}}` }}~ingress-proxy" - static_resources: - clusters: - - name: xds_cluster - connect_timeout: 5.000s - load_assignment: - cluster_name: xds_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: gloo - port_value: {{ .Values.gloo.deployment.xdsPort }} - http2_protocol_options: {} - upstream_connection_options: - tcp_keepalive: {} - type: STRICT_DNS - - name: rest_xds_cluster - alt_stat_name: rest_xds_cluster - connect_timeout: 5.000s - load_assignment: - cluster_name: rest_xds_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: gloo - port_value: {{ $.Values.gloo.deployment.restXdsPort }} - upstream_connection_options: - tcp_keepalive: {} - type: STRICT_DNS - respect_dns_ttl: true -{{- if .Values.ingressProxy.deployment.stats }} - - name: admin_port_cluster - connect_timeout: 5.000s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: admin_port_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 19000 - - listeners: - - name: prometheus_listener - address: - socket_address: - address: 0.0.0.0 - port_value: 8081 - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - codec_type: AUTO - stat_prefix: prometheus - route_config: - name: prometheus_route - virtual_hosts: - - name: prometheus_host - domains: - - "*" - routes: - - match: - prefix: "/metrics" - headers: - - name: ":method" - exact_match: GET - route: - prefix_rewrite: {{ .Values.global.glooStats.routePrefixRewrite }} - cluster: admin_port_cluster - {{- if .Values.global.glooStats.enableStatsRoute}} - - match: - prefix: "/stats" - headers: - - name: ":method" - exact_match: GET - route: - prefix_rewrite: {{ .Values.global.glooStats.statsPrefixRewrite }} - cluster: admin_port_cluster - {{- end }} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router -{{- if .Values.ingressProxy.tracing }} - tracing: -{{ toYaml .Values.ingressProxy.tracing | indent 22}} -{{- end}} {{/* if .Values.ingressProxy.tracing */}} -{{- end}} - - dynamic_resources: - ads_config: - transport_api_version: V3 - api_type: GRPC - rate_limit_settings: {} - grpc_services: - - envoy_grpc: {cluster_name: xds_cluster} - cds_config: - resource_api_version: V3 - ads: {} - lds_config: - resource_api_version: V3 - ads: {} - admin: - access_log_path: /dev/null - address: - socket_address: - address: {{ .Values.ingressProxy.loopBackAddress }} - port_value: 19000 -{{- else}}{{ toYaml .Values.ingressProxy.configMap.data | indent 2}}{{- end}} -{{- end }} -{{- end}} {{/* define "ingressProxy.configMapSpec "*/}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.ingressProxy -}} -{{- if .Values.ingressProxy.configMap -}} -{{- $kubeResourceOverride = .Values.ingressProxy.configMap.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "ingressProxy.configMapSpec") -}} - diff --git a/install/helm/gloo/templates/13-ingress-proxy-service.yaml b/install/helm/gloo/templates/13-ingress-proxy-service.yaml deleted file mode 100644 index e5bfe5cebc9..00000000000 --- a/install/helm/gloo/templates/13-ingress-proxy-service.yaml +++ /dev/null @@ -1,45 +0,0 @@ -{{- define "ingressProxy.serviceSpec" }} -{{- if .Values.ingress.enabled }} -apiVersion: v1 -kind: Service -metadata: - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: {{ .Values.ingressProxy.label }} - name: ingress-proxy - namespace: {{ .Release.Namespace }} -{{- if .Values.ingressProxy.service }} -{{- if .Values.ingressProxy.service.extraAnnotations }} - annotations: - {{- range $key, $value := .Values.ingressProxy.service.extraAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} -{{- end }} -{{- end }} -spec: - ports: - - port: {{ .Values.ingressProxy.service.httpPort }} - targetPort: {{ .Values.ingressProxy.deployment.httpPort }} - protocol: TCP - name: http - - port: {{ .Values.ingressProxy.service.httpsPort }} - targetPort: {{ .Values.ingressProxy.deployment.httpsPort }} - protocol: TCP - name: https - selector: - gloo: ingress-proxy - type: {{ .Values.ingressProxy.service.type }} - {{- if and (eq .Values.ingressProxy.service.type "LoadBalancer") .Values.ingressProxy.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.ingressProxy.service.loadBalancerIP }} - {{- end }} -{{- end }} {{/* if .Values.ingress.enabled */}} -{{- end }} {{/* define ingressProxy.serviceSpec */}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.ingressProxy -}} -{{- if .Values.ingressProxy.service -}} -{{- $kubeResourceOverride = .Values.ingressProxy.service.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "ingressProxy.serviceSpec") -}} diff --git a/install/helm/gloo/templates/14-clusteringress-proxy-deployment.yaml b/install/helm/gloo/templates/14-clusteringress-proxy-deployment.yaml deleted file mode 100644 index 525cf724871..00000000000 --- a/install/helm/gloo/templates/14-clusteringress-proxy-deployment.yaml +++ /dev/null @@ -1,94 +0,0 @@ -{{- define "clusterIngressProxy.deploymentSpec"}} -{{- if .Values.settings.integrations.knative.enabled }} -{{- if (semverCompare "< 0.8.0" .Values.settings.integrations.knative.version ) }} -{{- $image := .Values.settings.integrations.knative.proxy.image }} -{{- if .Values.global }} -{{- $image = merge .Values.settings.integrations.knative.proxy.image .Values.global.image }} -{{- end }} -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: clusteringress-proxy - name: clusteringress-proxy - namespace: {{ .Release.Namespace }} -spec: - replicas: {{ .Values.settings.integrations.knative.proxy.replicas }} - selector: - matchLabels: - gloo: clusteringress-proxy - template: - metadata: - labels: - gloo: clusteringress-proxy - {{- if .Values.settings.integrations.knative.proxy.extraClusterIngressProxyLabels }} - {{- range $key, $value := .Values.settings.integrations.knative.proxy.extraClusterIngressProxyLabels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - {{- if .Values.global.istioIntegration.disableAutoinjection }} - sidecar.istio.io/inject: "false" - {{- end }} - annotations: - checksum/clusteringress-envoy-config: {{ include (print .Template.BasePath "/15-clusteringress-proxy-configmap.yaml") . | sha256sum }} - {{- if .Values.settings.integrations.knative.proxy.extraClusterIngressProxyAnnotations }} - {{- range $key, $value := .Values.settings.integrations.knative.proxy.extraClusterIngressProxyAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - spec: - {{- include "gloo.pullSecret" $image | nindent 6 -}} - {{- include "gloo.podSpecStandardFields" .Values.settings.integrations.knative.proxy | nindent 6 -}} - containers: - - args: ["--disable-hot-restart"] - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: {{template "gloo.image" $image}} - imagePullPolicy: {{ $image.pullPolicy }} - name: clusteringress-proxy -{{- if .Values.settings.integrations.knative.proxy.resources }} - resources: -{{ toYaml .Values.settings.integrations.knative.proxy.resources | indent 10}} -{{- end}} - {{- $capabilities := dict "drop" (list "ALL") "add" (list "NET_BIND_SERVICE") -}} - {{- $securityDefaults := dict - "readOnlyRootFilesystem" true - "allowPrivilegeEscalation" false - "capabilities" $capabilities - }} - {{- include "gloo.containerSecurityContext" (dict "values" .Values.settings.integrations.knative.proxy.containerSecurityContext "defaults" $securityDefaults "podSecurityStandards" .Values.global.podSecurityStandards "indent" 8 "globalSec" .Values.global.securitySettings) }} - ports: - - containerPort: {{ .Values.settings.integrations.knative.proxy.httpPort }} - name: http - protocol: TCP - - containerPort: {{ .Values.settings.integrations.knative.proxy.httpsPort }} - name: https - protocol: TCP - volumeMounts: - - mountPath: /etc/envoy - name: envoy-config - volumes: - - configMap: - name: clusteringress-envoy-config - name: envoy-config - -{{- end }} {{/* if (semverCompare "< 0.8.0" .Values.settings.integrations.knative.version ) */}} -{{- end }} {{/* if .Values.settings.integrations.knative.enabled */}} -{{- end }} {{/* define clusterIngressProxy.deploymentSpec */}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.settings.integrations.knative.proxy -}} -{{- if .Values.settings.integrations.knative.proxy.deployment -}} -{{- $kubeResourceOverride = .Values.settings.integrations.knative.proxy.deployment.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "clusterIngressProxy.deploymentSpec") -}} \ No newline at end of file diff --git a/install/helm/gloo/templates/15-clusteringress-proxy-configmap.yaml b/install/helm/gloo/templates/15-clusteringress-proxy-configmap.yaml deleted file mode 100644 index 29c43e7f223..00000000000 --- a/install/helm/gloo/templates/15-clusteringress-proxy-configmap.yaml +++ /dev/null @@ -1,157 +0,0 @@ -{{- define "clusterIngressProxy.configMapSpec"}} -{{- if .Values.settings.integrations.knative.enabled }} -{{- if (semverCompare "< 0.8.0" .Values.settings.integrations.knative.version ) }} - -# configmap -apiVersion: v1 -kind: ConfigMap -metadata: - name: clusteringress-envoy-config - namespace: {{ .Release.Namespace }} - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: clusteringress-proxy -data: - envoy.yaml: | - layered_runtime: - layers: - - name: static_layer - static_layer: - overload: - global_downstream_max_connections: 250000 - - name: admin_layer - admin_layer: {} - node: - cluster: clusteringress - id: "{{ `{{.PodName}}.{{.PodNamespace}}` }}" - metadata: - # role's value is the key for the in-memory xds cache (projects/gloo/pkg/xds/envoy.go) - role: "{{ `{{.PodNamespace}}` }}~clusteringress-proxy" - static_resources: - clusters: - - name: xds_cluster - connect_timeout: 5.000s - load_assignment: - cluster_name: xds_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: gloo - port_value: {{ .Values.gloo.deployment.xdsPort }} - http2_protocol_options: {} - upstream_connection_options: - tcp_keepalive: {} - type: STRICT_DNS - - name: rest_xds_cluster - alt_stat_name: rest_xds_cluster - connect_timeout: 5.000s - load_assignment: - cluster_name: rest_xds_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: gloo - port_value: {{ $.Values.gloo.deployment.restXdsPort }} - upstream_connection_options: - tcp_keepalive: {} - type: STRICT_DNS - respect_dns_ttl: true -{{- if .Values.settings.integrations.knative.proxy.stats }} - - name: admin_port_cluster - connect_timeout: 5.000s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: admin_port_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 19000 - - listeners: - - name: prometheus_listener - address: - socket_address: - address: 0.0.0.0 - port_value: 8081 - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - codec_type: AUTO - stat_prefix: prometheus - route_config: - name: prometheus_route - virtual_hosts: - - name: prometheus_host - domains: - - "*" - routes: - - match: - prefix: "/metrics" - headers: - - name: ":method" - exact_match: GET - route: - prefix_rewrite: {{ .Values.global.glooStats.routePrefixRewrite }} - cluster: admin_port_cluster - {{- if .Values.global.glooStats.enableStatsRoute}} - - match: - prefix: "/stats" - headers: - - name: ":method" - exact_match: GET - route: - prefix_rewrite: {{ .Values.global.glooStats.statsPrefixRewrite }} - cluster: admin_port_cluster - {{- end }} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router -{{- if .Values.settings.integrations.knative.proxy.tracing }} - tracing: -{{ toYaml .Values.settings.integrations.knative.proxy.tracing | indent 22}} -{{- end}} {{/* if .Values.settings.integrations.knative.proxy.tracing */}} -{{- end}} - - - dynamic_resources: - ads_config: - transport_api_version: V3 - api_type: GRPC - rate_limit_settings: {} - grpc_services: - - envoy_grpc: {cluster_name: xds_cluster} - cds_config: - resource_api_version: V3 - ads: {} - lds_config: - resource_api_version: V3 - ads: {} - admin: - access_log_path: /dev/null - address: - socket_address: - address: {{ .Values.settings.integrations.knative.proxy.loopBackAddress }} - port_value: 19000 -{{- end }} -{{- end }} {{/* if .Values.settings.integrations.knative.enabled */}} -{{- end }} {{/*define "clusterIngressProxy.configMapSpec"*/}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.settings.integrations.knative.proxy -}} -{{- if .Values.settings.integrations.knative.proxy.configMap -}} -{{- $kubeResourceOverride = .Values.settings.integrations.knative.proxy.configMap.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "clusterIngressProxy.configMapSpec") -}} \ No newline at end of file diff --git a/install/helm/gloo/templates/16-clusteringress-proxy-service.yaml b/install/helm/gloo/templates/16-clusteringress-proxy-service.yaml deleted file mode 100644 index 4e6a922bf66..00000000000 --- a/install/helm/gloo/templates/16-clusteringress-proxy-service.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- define "clusterIngressProxy.serviceSpec"}} -{{- if .Values.settings.integrations.knative.enabled }} -{{- if (semverCompare "< 0.8.0" .Values.settings.integrations.knative.version ) }} -apiVersion: v1 -kind: Service -metadata: - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: clusteringress-proxy - name: clusteringress-proxy - namespace: {{ .Release.Namespace }} -spec: - ports: - - port: {{ .Values.settings.integrations.knative.proxy.service.httpPort }} - targetPort: {{ .Values.settings.integrations.knative.proxy.httpPort }} - protocol: TCP - name: http - - port: {{ .Values.settings.integrations.knative.proxy.service.httpsPort }} - targetPort: {{ .Values.settings.integrations.knative.proxy.httpsPort }} - protocol: TCP - name: https - selector: - gloo: clusteringress-proxy - type: {{ .Values.settings.integrations.knative.proxy.service.type }} -{{- end }} -{{- end }} {{/* if .Values.settings.integrations.knative.enabled */}} -{{- end }} {{/*define "clusterIngressProxy.serviceSpec"*/}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.settings.integrations.knative.proxy -}} -{{- if .Values.settings.integrations.knative.proxy.service -}} -{{- $kubeResourceOverride = .Values.settings.integrations.knative.proxy.service.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "clusterIngressProxy.serviceSpec") -}} \ No newline at end of file diff --git a/install/helm/gloo/templates/21-namespace-clusterrole-ingress.yaml b/install/helm/gloo/templates/21-namespace-clusterrole-ingress.yaml deleted file mode 100644 index a5519055347..00000000000 --- a/install/helm/gloo/templates/21-namespace-clusterrole-ingress.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- if .Values.global.glooRbac.create }} - -{{- if .Values.ingress.enabled }} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: gloo-role-ingress{{ include "gloo.rbacNameSuffix" . }} - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: rbac -rules: -- apiGroups: [""] - resources: ["pods", "services", "secrets", "endpoints", "configmaps"] - verbs: ["*"] -- apiGroups: [""] - resources: ["namespaces"] - verbs: ["get", "list", "watch"] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "create"] -- apiGroups: ["gloo.solo.io", "enterprise.gloo.solo.io", "gateway.solo.io"] - resources: ["settings", "upstreams", "upstreamgroups", "proxies", "authconfigs", "virtualservices", "routetables", "virtualhostoptions", "routeoptions", "gateways", "httpgateways", "tcpgateways"] - verbs: ["*"] -- apiGroups: ["ratelimit.solo.io"] - resources: ["ratelimitconfigs", "ratelimitconfigs/status"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: ["graphql.gloo.solo.io"] - resources: ["graphqlapis", "graphqlapis/status"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: ["networking.k8s.io", ""] - resources: ["ingresses", "ingresses/status"] - verbs: ["*"] -{{- end -}} - -{{- end -}} diff --git a/install/helm/gloo/templates/22-namespace-clusterrole-knative.yaml b/install/helm/gloo/templates/22-namespace-clusterrole-knative.yaml deleted file mode 100644 index c80777301fc..00000000000 --- a/install/helm/gloo/templates/22-namespace-clusterrole-knative.yaml +++ /dev/null @@ -1,44 +0,0 @@ -{{- if .Values.global.glooRbac.create }} - -{{- if .Values.settings.integrations.knative.enabled }} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: gloo-role-knative{{ include "gloo.rbacNameSuffix" . }} - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: rbac -rules: -- apiGroups: [""] - resources: ["pods", "services", "secrets", "endpoints", "configmaps"] - verbs: ["*"] -- apiGroups: [""] - resources: ["namespaces"] - verbs: ["get", "list", "watch"] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "create"] -- apiGroups: ["gloo.solo.io", "enterprise.gloo.solo.io"] - resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices", "routetables", "authconfigs"] - verbs: ["*"] -- apiGroups: ["ratelimit.solo.io"] - resources: ["ratelimitconfigs","ratelimitconfigs/status"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: ["graphql.gloo.solo.io"] - resources: ["graphqlapis", "graphqlapis/status"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: ["networking.internal.knative.dev"] - resources: ["clusteringresses"] - verbs: ["get", "list", "watch"] -- apiGroups: ["networking.internal.knative.dev"] - resources: ["clusteringresses/status"] - verbs: ["update"] -- apiGroups: ["networking.internal.knative.dev"] - resources: ["ingresses"] - verbs: ["get", "list", "watch"] -- apiGroups: ["networking.internal.knative.dev"] - resources: ["ingresses/status"] - verbs: ["update"] -{{- end -}} - -{{- end -}} diff --git a/install/helm/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml b/install/helm/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml deleted file mode 100644 index 83ac5841777..00000000000 --- a/install/helm/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.global.glooRbac.create }} - -{{- if .Values.ingress.enabled }} -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: gloo-role-binding-ingress{{ include "gloo.rbacNameSuffix" . }} - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: rbac -subjects: -- kind: ServiceAccount - name: default - namespace: {{ .Release.Namespace }} -- kind: ServiceAccount - name: discovery - namespace: {{ .Release.Namespace }} -- kind: ServiceAccount - name: gloo - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: gloo-role-ingress{{ include "gloo.rbacNameSuffix" . }} - apiGroup: rbac.authorization.k8s.io - -{{- end -}} - -{{- end -}} diff --git a/install/helm/gloo/templates/26-knative-external-proxy-deployment.yaml b/install/helm/gloo/templates/26-knative-external-proxy-deployment.yaml deleted file mode 100644 index 7b6bf254d6f..00000000000 --- a/install/helm/gloo/templates/26-knative-external-proxy-deployment.yaml +++ /dev/null @@ -1,97 +0,0 @@ -{{- define "knativeExternalProxy.deploymentSpec"}} -{{- if .Values.settings.integrations.knative.enabled }} -{{- if (semverCompare ">= 0.8.0" .Values.settings.integrations.knative.version ) }} -{{- $image := .Values.settings.integrations.knative.proxy.image }} -{{- if .Values.global }} -{{- $image = merge .Values.settings.integrations.knative.proxy.image .Values.global.image }} -{{- end }} -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: knative-external-proxy - name: knative-external-proxy - namespace: {{ .Release.Namespace }} -spec: - replicas: {{ .Values.settings.integrations.knative.proxy.replicas }} - selector: - matchLabels: - gloo: knative-external-proxy - template: - metadata: - labels: - gloo: knative-external-proxy - {{- if .Values.settings.integrations.knative.extraKnativeExternalLabels }} - {{- range $key, $value := .Values.settings.integrations.knative.extraKnativeExternalLabels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - {{- if .Values.global.istioIntegration.disableAutoinjection }} - sidecar.istio.io/inject: "false" - {{- end }} - annotations: - checksum/knative-external-proxy-config: {{ include (print .Template.BasePath "/27-knative-external-proxy-configmap.yaml") . | sha256sum }} - {{- if .Values.settings.integrations.knative.extraKnativeExternalAnnotations }} - {{- range $key, $value := .Values.settings.integrations.knative.extraKnativeExternalAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - spec: - {{- include "gloo.pullSecret" $image | nindent 6 -}} - {{- include "gloo.podSpecStandardFields" .Values.settings.integrations.knative.proxy | nindent 6 -}} - containers: - - args: ["--disable-hot-restart"] - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: {{ template "gloo.image" $image }} - imagePullPolicy: {{ $image.pullPolicy }} - name: knative-external-proxy -{{- if .Values.settings.integrations.knative.proxy.resources }} - resources: -{{ toYaml .Values.settings.integrations.knative.proxy.resources | indent 10}} -{{- end}} - {{- $capabilities := dict "drop" (list "ALL") "add" (list "NET_BIND_SERVICE") -}} - {{- $securityDefaults := dict - "readOnlyRootFilesystem" true - "allowPrivilegeEscalation" false - "capabilities" $capabilities - }} - {{- if .Values.settings.integrations.knative.proxy.runAsUser -}} - {{- $_ := set $securityDefaults "runAsUser" .Values.settings.integrations.knative.proxy.runAsUser }} - {{- end -}} - {{- include "gloo.containerSecurityContext" (dict "values" .Values.settings.integrations.knative.proxy.containerSecurityContext "defaults" $securityDefaults "podSecurityStandards" .Values.global.podSecurityStandards "indent" 8 "globalSec" .Values.global.securitySettings) }} - ports: - - containerPort: {{ .Values.settings.integrations.knative.proxy.httpPort }} - name: http - protocol: TCP - - containerPort: {{ .Values.settings.integrations.knative.proxy.httpsPort }} - name: https - protocol: TCP - volumeMounts: - - mountPath: /etc/envoy - name: envoy-config - volumes: - - configMap: - name: knative-external-proxy-config - name: envoy-config - -{{- end }} -{{- end }} -{{- end }} {{/* define knativeExternalProxy.deploymentSpec */}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.settings.integrations.knative.proxy -}} -{{- if .Values.settings.integrations.knative.proxy.deployment -}} -{{- $kubeResourceOverride = .Values.settings.integrations.knative.proxy.deployment.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "knativeExternalProxy.deploymentSpec") -}} \ No newline at end of file diff --git a/install/helm/gloo/templates/27-knative-external-proxy-configmap.yaml b/install/helm/gloo/templates/27-knative-external-proxy-configmap.yaml deleted file mode 100644 index bfaba453491..00000000000 --- a/install/helm/gloo/templates/27-knative-external-proxy-configmap.yaml +++ /dev/null @@ -1,156 +0,0 @@ -{{- define "knativeExternalProxy.configMapSpec"}} -{{- if .Values.settings.integrations.knative.enabled }} -{{- if (semverCompare ">= 0.8.0" .Values.settings.integrations.knative.version ) }} - -# configmap -apiVersion: v1 -kind: ConfigMap -metadata: - name: knative-external-proxy-config - namespace: {{ .Release.Namespace }} - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: knative-external-proxy -data: - envoy.yaml: | - layered_runtime: - layers: - - name: static_layer - static_layer: - overload: - global_downstream_max_connections: 250000 - - name: admin_layer - admin_layer: {} - node: - cluster: knative - id: "{{ `{{.PodName}}.{{.PodNamespace}}` }}" - metadata: - # role's value is the key for the in-memory xds cache (projects/gloo/pkg/xds/envoy.go) - role: "{{ `{{.PodNamespace}}` }}~knative-external-proxy" - static_resources: - clusters: - - name: xds_cluster - connect_timeout: 5.000s - load_assignment: - cluster_name: xds_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: gloo - port_value: {{ .Values.gloo.deployment.xdsPort }} - http2_protocol_options: {} - upstream_connection_options: - tcp_keepalive: {} - type: STRICT_DNS - - name: rest_xds_cluster - alt_stat_name: rest_xds_cluster - connect_timeout: 5.000s - load_assignment: - cluster_name: rest_xds_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: gloo - port_value: {{ $.Values.gloo.deployment.restXdsPort }} - upstream_connection_options: - tcp_keepalive: {} - type: STRICT_DNS - respect_dns_ttl: true -{{- if .Values.settings.integrations.knative.proxy.stats }} - - name: admin_port_cluster - connect_timeout: 5.000s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: admin_port_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 19000 - - listeners: - - name: prometheus_listener - address: - socket_address: - address: 0.0.0.0 - port_value: 8081 - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - codec_type: AUTO - stat_prefix: prometheus - route_config: - name: prometheus_route - virtual_hosts: - - name: prometheus_host - domains: - - "*" - routes: - - match: - prefix: "/metrics" - headers: - - name: ":method" - exact_match: GET - route: - prefix_rewrite: {{ .Values.global.glooStats.routePrefixRewrite }} - cluster: admin_port_cluster - {{- if .Values.global.glooStats.enableStatsRoute}} - - match: - prefix: "/stats" - headers: - - name: ":method" - exact_match: GET - route: - prefix_rewrite: {{ .Values.global.glooStats.statsPrefixRewrite }} - cluster: admin_port_cluster - {{- end }} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router -{{- if .Values.settings.integrations.knative.proxy.tracing }} - tracing: -{{ toYaml .Values.settings.integrations.knative.proxy.tracing | indent 22}} -{{- end}} {{/* if .Values.settings.integrations.knative.proxy.tracing */}} -{{- end}} - - dynamic_resources: - ads_config: - transport_api_version: V3 - api_type: GRPC - rate_limit_settings: {} - grpc_services: - - envoy_grpc: {cluster_name: xds_cluster} - cds_config: - resource_api_version: V3 - ads: {} - lds_config: - resource_api_version: V3 - ads: {} - admin: - access_log_path: /dev/null - address: - socket_address: - address: {{ .Values.settings.integrations.knative.proxy.loopBackAddress }} - port_value: 19000 -{{- end }} {{/* if (semverCompare ">= 0.8.0" .Values.settings.integrations.knative.version ) */}} -{{- end }} {{/* .Values.settings.integrations.knative.enabled */}} -{{- end }} {{/*define "knativeExternalProxy.configMapSpec"*/}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.settings.integrations.knative.proxy -}} -{{- if .Values.settings.integrations.knative.proxy.configMap -}} -{{- $kubeResourceOverride = .Values.settings.integrations.knative.proxy.configMap.kubeResourceOverride -}} -{{- end }} {{/* if .Values.settings.integrations.knative.configMap */}} -{{- end }} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "knativeExternalProxy.configMapSpec") -}} \ No newline at end of file diff --git a/install/helm/gloo/templates/28-knative-external-proxy-service.yaml b/install/helm/gloo/templates/28-knative-external-proxy-service.yaml deleted file mode 100644 index 9a2ef1b5888..00000000000 --- a/install/helm/gloo/templates/28-knative-external-proxy-service.yaml +++ /dev/null @@ -1,47 +0,0 @@ -{{- define "knativeExternalProxy.serviceSpec"}} -{{- if .Values.settings.integrations.knative.enabled }} -{{- if (semverCompare ">= 0.8.0" .Values.settings.integrations.knative.version ) }} -apiVersion: v1 -kind: Service -metadata: - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: knative-external-proxy - name: knative-external-proxy - namespace: {{ .Release.Namespace }} -{{- if .Values.settings.integrations.knative.proxy.service }} -{{- if .Values.settings.integrations.knative.proxy.service.extraAnnotations }} - annotations: - {{- range $key, $value := .Values.ingressProxy.service.extraAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} -{{- end }} -{{- end }} -spec: - ports: - - port: {{ .Values.settings.integrations.knative.proxy.service.httpPort }} - targetPort: {{ .Values.settings.integrations.knative.proxy.httpPort }} - protocol: TCP - name: http - - port: {{ .Values.settings.integrations.knative.proxy.service.httpsPort }} - targetPort: {{ .Values.settings.integrations.knative.proxy.httpsPort }} - protocol: TCP - name: https - selector: - gloo: knative-external-proxy - type: {{ .Values.settings.integrations.knative.proxy.service.type }} - {{- if and (eq .Values.settings.integrations.knative.proxy.service.type "LoadBalancer") .Values.settings.integrations.knative.proxy.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.settings.integrations.knative.proxy.service.loadBalancerIP }} - {{- end }} -{{- end }} {{/* if (semverCompare ">= 0.8.0" .Values.settings.integrations.knative.version ) */}} -{{- end }} {{/* if .Values.settings.integrations.knative.enabled */}} -{{- end }} {{/*define "knativeExternalProxy.serviceSpec"*/}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.settings.integrations.knative.proxy -}} -{{- if .Values.settings.integrations.knative.proxy.service -}} -{{- $kubeResourceOverride = .Values.settings.integrations.knative.proxy.service.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "knativeExternalProxy.serviceSpec") -}} \ No newline at end of file diff --git a/install/helm/gloo/templates/29-knative-internal-proxy-deployment.yaml b/install/helm/gloo/templates/29-knative-internal-proxy-deployment.yaml deleted file mode 100644 index 19d408eb185..00000000000 --- a/install/helm/gloo/templates/29-knative-internal-proxy-deployment.yaml +++ /dev/null @@ -1,99 +0,0 @@ -{{- define "knativeInternalProxy.deploymentSpec"}} -{{- if .Values.settings.integrations.knative.enabled }} -{{- if (semverCompare ">= 0.8.0" .Values.settings.integrations.knative.version ) }} -{{- $image := .Values.settings.integrations.knative.proxy.image }} -{{- if .Values.global }} -{{- $image = merge .Values.settings.integrations.knative.proxy.image .Values.global.image }} -{{- end }} -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: knative-internal-proxy - name: knative-internal-proxy - namespace: {{ .Release.Namespace }} -spec: - replicas: {{ .Values.settings.integrations.knative.proxy.replicas }} - selector: - matchLabels: - gloo: knative-internal-proxy - template: - metadata: - labels: - gloo: knative-internal-proxy - {{- if .Values.settings.integrations.knative.extraKnativeInternalLabels }} - {{- range $key, $value := .Values.settings.integrations.knative.extraKnativeInternalLabels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - {{- if .Values.global.istioIntegration.disableAutoinjection }} - sidecar.istio.io/inject: "false" - {{- end }} - annotations: - checksum/knative-internal-proxy-config: {{ include (print .Template.BasePath "/30-knative-internal-proxy-configmap.yaml") . | sha256sum }} - {{- if .Values.settings.integrations.knative.extraKnativeInternalAnnotations }} - {{- range $key, $value := .Values.settings.integrations.knative.extraKnativeInternalAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - spec: - {{- include "gloo.pullSecret" $image | nindent 6 -}} - {{- include "gloo.podSpecStandardFields" .Values.settings.integrations.knative.proxy | nindent 6 -}} - containers: - - args: ["--disable-hot-restart"] - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: {{template "gloo.image" $image}} - imagePullPolicy: {{ $image.pullPolicy }} - name: knative-internal-proxy -{{- if .Values.settings.integrations.knative.proxy.resources }} - resources: -{{ toYaml .Values.settings.integrations.knative.proxy.resources | indent 10}} -{{- end}} - {{- $capabilities := dict "drop" (list "ALL") "add" (list "NET_BIND_SERVICE") -}} - {{- $securityDefaults := dict - "readOnlyRootFilesystem" true - "allowPrivilegeEscalation" false - "capabilities" $capabilities - }} - {{- if .Values.settings.integrations.knative.proxy.runAsUser -}} - {{- $_ := set $securityDefaults "runAsUser" .Values.settings.integrations.knative.proxy.runAsUser }} - {{- end -}} - {{- include "gloo.containerSecurityContext" (dict "values" .Values.settings.integrations.knative.proxy.containerSecurityContext "defaults" $securityDefaults "podSecurityStandards" .Values.global.podSecurityStandards "indent" 8 "globalSec" .Values.global.securitySettings) }} - ports: - - containerPort: {{ .Values.settings.integrations.knative.proxy.httpPort }} - name: http - protocol: TCP - - containerPort: {{ .Values.settings.integrations.knative.proxy.httpsPort }} - name: https - protocol: TCP - volumeMounts: - - mountPath: /etc/envoy - name: envoy-config - volumes: - - configMap: - name: knative-internal-proxy-config - name: envoy-config - -{{- end }} -{{- end }} {{/* if .Values.settings.integrations.knative.enabled */}} -{{- end }} {{/* define knativeInternalProxy.deploymentSpec */}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.settings.integrations.knative.proxy -}} -{{- if .Values.settings.integrations.knative.proxy.internal -}} -{{- if .Values.settings.integrations.knative.proxy.internal.deployment -}} -{{- $kubeResourceOverride = .Values.settings.integrations.knative.proxy.internal.deployment.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "knativeInternalProxy.deploymentSpec") -}} \ No newline at end of file diff --git a/install/helm/gloo/templates/30-knative-internal-proxy-configmap.yaml b/install/helm/gloo/templates/30-knative-internal-proxy-configmap.yaml deleted file mode 100644 index 16e0c462f85..00000000000 --- a/install/helm/gloo/templates/30-knative-internal-proxy-configmap.yaml +++ /dev/null @@ -1,158 +0,0 @@ -{{- define "knativeInternalProxy.configMapSpec"}} -{{- if .Values.settings.integrations.knative.enabled }} -{{- if (semverCompare ">= 0.8.0" .Values.settings.integrations.knative.version ) }} - -# configmap -apiVersion: v1 -kind: ConfigMap -metadata: - name: knative-internal-proxy-config - namespace: {{ .Release.Namespace }} - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: knative-internal-proxy -data: - envoy.yaml: | - layered_runtime: - layers: - - name: static_layer - static_layer: - overload: - global_downstream_max_connections: 250000 - - name: admin_layer - admin_layer: {} - node: - cluster: knative - id: "{{ `{{.PodName}}.{{.PodNamespace}}` }}" - metadata: - # role's value is the key for the in-memory xds cache (projects/gloo/pkg/xds/envoy.go) - role: "{{ `{{.PodNamespace}}` }}~knative-internal-proxy" - static_resources: - clusters: - - name: xds_cluster - connect_timeout: 5.000s - load_assignment: - cluster_name: xds_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: gloo - port_value: {{ .Values.gloo.deployment.xdsPort }} - http2_protocol_options: {} - upstream_connection_options: - tcp_keepalive: {} - type: STRICT_DNS - - name: rest_xds_cluster - alt_stat_name: rest_xds_cluster - connect_timeout: 5.000s - load_assignment: - cluster_name: rest_xds_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: gloo - port_value: {{ $.Values.gloo.deployment.restXdsPort }} - upstream_connection_options: - tcp_keepalive: {} - type: STRICT_DNS - respect_dns_ttl: true -{{- if .Values.settings.integrations.knative.proxy.stats }} - - name: admin_port_cluster - connect_timeout: 5.000s - type: STATIC - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: admin_port_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 19000 - - listeners: - - name: prometheus_listener - address: - socket_address: - address: 0.0.0.0 - port_value: 8081 - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - codec_type: AUTO - stat_prefix: prometheus - route_config: - name: prometheus_route - virtual_hosts: - - name: prometheus_host - domains: - - "*" - routes: - - match: - prefix: "/metrics" - headers: - - name: ":method" - exact_match: GET - route: - prefix_rewrite: {{ .Values.global.glooStats.routePrefixRewrite }} - cluster: admin_port_cluster - {{- if .Values.global.glooStats.enableStatsRoute}} - - match: - prefix: "/stats" - headers: - - name: ":method" - exact_match: GET - route: - prefix_rewrite: {{ .Values.global.glooStats.statsPrefixRewrite }} - cluster: admin_port_cluster - {{- end }} - http_filters: - - name: envoy.filters.http.router - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router -{{- if .Values.settings.integrations.knative.proxy.tracing }} - tracing: -{{ toYaml .Values.settings.integrations.knative.proxy.tracing | indent 22}} -{{- end}} {{/* if .Values.settings.integrations.knative.proxy.tracing */}} -{{- end}} - - dynamic_resources: - ads_config: - transport_api_version: V3 - api_type: GRPC - rate_limit_settings: {} - grpc_services: - - envoy_grpc: {cluster_name: xds_cluster} - cds_config: - resource_api_version: V3 - ads: {} - lds_config: - resource_api_version: V3 - ads: {} - admin: - access_log_path: /dev/null - address: - socket_address: - address: {{ .Values.settings.integrations.knative.proxy.loopBackAddress }} - port_value: 19000 -{{- end }} {{/* if (semverCompare ">= 0.8.0" .Values.settings.integrations.knative.version ) */}} -{{- end }} {{/* if .Values.settings.integrations.knative.enabled */}} -{{- end }} {{/*define "knativeInternalProxy.configMapSpec"*/}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.settings.integrations.knative.proxy -}} -{{- if .Values.settings.integrations.knative.proxy.internal -}} -{{- if .Values.settings.integrations.knative.proxy.internal.configMap -}} -{{- $kubeResourceOverride = .Values.settings.integrations.knative.proxy.internal.configMap.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "knativeInternalProxy.configMapSpec") -}} \ No newline at end of file diff --git a/install/helm/gloo/templates/31-knative-internal-proxy-service.yaml b/install/helm/gloo/templates/31-knative-internal-proxy-service.yaml deleted file mode 100644 index bf2989da5ee..00000000000 --- a/install/helm/gloo/templates/31-knative-internal-proxy-service.yaml +++ /dev/null @@ -1,38 +0,0 @@ -{{- define "knativeInternalProxy.serviceSpec"}} -{{- if .Values.settings.integrations.knative.enabled }} -{{- if (semverCompare ">= 0.8.0" .Values.settings.integrations.knative.version ) }} -apiVersion: v1 -kind: Service -metadata: - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: knative-internal-proxy - name: knative-internal-proxy - namespace: {{ .Release.Namespace }} -spec: - ports: - - port: {{ .Values.settings.integrations.knative.proxy.service.httpPort }} - targetPort: {{ .Values.settings.integrations.knative.proxy.httpPort }} - protocol: TCP - name: http - - port: {{ .Values.settings.integrations.knative.proxy.service.httpsPort }} - targetPort: {{ .Values.settings.integrations.knative.proxy.httpsPort }} - protocol: TCP - name: https - selector: - gloo: knative-internal-proxy - type: ClusterIP -{{- end }} -{{- end }} {{/* if .Values.settings.integrations.knative.enabled */}} -{{- end }} {{/*define "knativeInternalProxy.serviceSpec"*/}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.settings.integrations.knative.proxy -}} -{{- if .Values.settings.integrations.knative.proxy.internal -}} -{{- if .Values.settings.integrations.knative.proxy.internal.service -}} -{{- $kubeResourceOverride = .Values.settings.integrations.knative.proxy.internal.service.kubeResourceOverride -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "knativeInternalProxy.serviceSpec") -}} \ No newline at end of file diff --git a/projects/accesslogger/cmd/Dockerfile b/projects/accesslogger/cmd/Dockerfile deleted file mode 100644 index 86baf35ed55..00000000000 --- a/projects/accesslogger/cmd/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -ARG BASE_IMAGE - -FROM $BASE_IMAGE - -ARG GOARCH=amd64 -RUN apk -U upgrade && apk add ca-certificates && rm -rf /var/cache/apk/* -COPY access-logger-linux-$GOARCH /usr/local/bin/access-logger - -USER 10101 - -ENTRYPOINT ["/usr/local/bin/access-logger"] \ No newline at end of file diff --git a/projects/accesslogger/cmd/Dockerfile.distroless b/projects/accesslogger/cmd/Dockerfile.distroless deleted file mode 100644 index b05263b8712..00000000000 --- a/projects/accesslogger/cmd/Dockerfile.distroless +++ /dev/null @@ -1,10 +0,0 @@ -ARG BASE_IMAGE - -FROM $BASE_IMAGE -ARG GOARCH=amd64 - -COPY access-logger-linux-$GOARCH /usr/local/bin/access-logger - -USER 10101 - -ENTRYPOINT ["/usr/local/bin/access-logger"] \ No newline at end of file diff --git a/projects/accesslogger/cmd/main.go b/projects/accesslogger/cmd/main.go deleted file mode 100644 index 7952e04e977..00000000000 --- a/projects/accesslogger/cmd/main.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -import ( - "github.com/solo-io/gloo/projects/accesslogger/pkg/runner" - "github.com/solo-io/go-utils/stats" -) - -func main() { - stats.ConditionallyStartStatsServer() - runner.Run() -} diff --git a/projects/accesslogger/pkg/loggingservice/server.go b/projects/accesslogger/pkg/loggingservice/server.go deleted file mode 100644 index 6b83e376d67..00000000000 --- a/projects/accesslogger/pkg/loggingservice/server.go +++ /dev/null @@ -1,71 +0,0 @@ -package loggingservice - -import ( - "context" - - envoyals "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" - "github.com/solo-io/go-utils/contextutils" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -// server is used to implement envoyals.AccessLogServiceServer. - -type AlsCallback func(ctx context.Context, message *envoyals.StreamAccessLogsMessage) error -type AlsCallbackList []AlsCallback - -type Server struct { - opts *Options -} - -var _ envoyals.AccessLogServiceServer = new(Server) - -func (s *Server) StreamAccessLogs(srv envoyals.AccessLogService_StreamAccessLogsServer) error { - msg, err := srv.Recv() - if err != nil { - return err - } - - ctx := contextutils.WithLoggerValues( - s.opts.Ctx, - zap.String("logger_name", msg.GetIdentifier().GetLogName()), - zap.String("node_id", msg.GetIdentifier().GetNode().GetId()), - zap.String("node_cluster", msg.GetIdentifier().GetNode().GetCluster()), - zap.Any("node_locality", msg.GetIdentifier().GetNode().GetLocality()), - zap.Any("node_metadata", msg.GetIdentifier().GetNode().GetMetadata()), - ) - contextutils.LoggerFrom(ctx).Info("received access log message") - - if s.opts.Ordered { - for _, cb := range s.opts.Callbacks { - if err := cb(ctx, msg); err != nil { - return err - } - } - } else { - eg := errgroup.Group{} - for _, cb := range s.opts.Callbacks { - cb := cb - eg.Go(func() error { - return cb(ctx, msg) - }) - } - if err := eg.Wait(); err != nil { - return err - } - } - return nil -} - -type Options struct { - Ordered bool - Callbacks AlsCallbackList - Ctx context.Context -} - -func NewServer(opts Options) *Server { - if opts.Ctx == nil { - opts.Ctx = context.Background() - } - return &Server{opts: &opts} -} diff --git a/projects/accesslogger/pkg/runner/run.go b/projects/accesslogger/pkg/runner/run.go deleted file mode 100644 index a316adcdb16..00000000000 --- a/projects/accesslogger/pkg/runner/run.go +++ /dev/null @@ -1,269 +0,0 @@ -package runner - -import ( - "context" - "fmt" - "net" - - envoy_data_accesslog_v3 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" - pb "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" - _struct "github.com/golang/protobuf/ptypes/struct" - "github.com/solo-io/gloo/pkg/utils/statsutils" - "github.com/solo-io/gloo/projects/accesslogger/pkg/loggingservice" - "github.com/solo-io/gloo/projects/gloo/pkg/plugins/transformation" - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/healthchecker" - "github.com/solo-io/go-utils/stats" - "go.opencensus.io/plugin/ocgrpc" - ocstats "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/reflection" -) - -func init() { - view.Register(ocgrpc.DefaultServerViews...) - view.Register(accessLogsRequestsView, accessLogsDownstreamRespTimeView, accessLogsUpstreamRespTimeView) -} - -var ( - responseCodeKey, _ = tag.NewKey("response_code") - clusterKey, _ = tag.NewKey("cluster") - requestMethodKey, _ = tag.NewKey("request_method") - // add more keys here (and in the `utils.Measure()` calls) if you want additional dimensions/labels on the - // access logging metrics. take care to ensure the cardinality of the values of these keys is low enough that - // prometheus can handle the load. - tagKeys = []tag.Key{responseCodeKey, clusterKey, requestMethodKey} - - mAccessLogsRequests = ocstats.Int64("gloo.solo.io/accesslogging/requests", "The number of requests. Can be lossy.", ocstats.UnitDimensionless) - accessLogsRequestsView = &view.View{ - Name: "gloo.solo.io/accesslogging/requests", - Measure: mAccessLogsRequests, - Description: "The number of requests. Can be lossy.", - Aggregation: view.Count(), - TagKeys: tagKeys, - } - - mAccessLogsDownstreamRespTime = ocstats.Int64("gloo.solo.io/accesslogging/downstream_resp_time", "The downstream request time (ns). Can be lossy.", ocstats.UnitDimensionless) - accessLogsDownstreamRespTimeView = &view.View{ - Name: "gloo.solo.io/accesslogging/downstream_resp_time", - Measure: mAccessLogsDownstreamRespTime, - Description: "The downstream request time (ns). Can be lossy.", - Aggregation: view.Distribution(0.5, 1, 5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 30000, 60000, 300000, 600000, 1800000, 9000000, 45000000, 225000000, 1125000000, 3375000000), - TagKeys: tagKeys, - } - - mAccessLogsUpstreamRespTime = ocstats.Int64("gloo.solo.io/accesslogging/upstream_resp_time", "The upstream request time (ns). Can be lossy.", ocstats.UnitDimensionless) - accessLogsUpstreamRespTimeView = &view.View{ - Name: "gloo.solo.io/accesslogging/upstream_resp_time", - Measure: mAccessLogsUpstreamRespTime, - Description: "The upstream request time (ns). Can be lossy.", - Aggregation: view.Distribution(0.5, 1, 5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 30000, 60000, 300000, 600000, 1800000, 9000000, 45000000, 225000000, 1125000000, 3375000000), - TagKeys: tagKeys, - } -) - -func Run() { - clientSettings := NewSettings() - ctx := contextutils.WithLogger(context.Background(), "access_log") - - if clientSettings.DebugPort != 0 { - // TODO(yuval-k): we need to start the stats server before calling contextutils - // need to think of a better way to express this dependency, or preferably, fix it. - stats.StartStatsServerWithPort(stats.StartupOptions{Port: clientSettings.DebugPort}) - } - - opts := loggingservice.Options{ - Callbacks: loggingservice.AlsCallbackList{ - func(ctx context.Context, message *pb.StreamAccessLogsMessage) error { - logger := contextutils.LoggerFrom(ctx) - switch msg := message.GetLogEntries().(type) { - case *pb.StreamAccessLogsMessage_HttpLogs: - for _, v := range msg.HttpLogs.GetLogEntry() { - - meta := v.GetCommonProperties().GetMetadata().GetFilterMetadata() - // we could put any other kind of data into the transformation metadata, including more - // detailed request info or info that gets dropped once translated into envoy config. For - // example, virtual service name, virtual service namespace, virtual service base path, - // virtual service route (operation path), the request/response body, etc. - // - // transformations can live at the virtual host, route, and weighted destination level on the - // `Proxy`, so users can add very granular information to the transformation filter metadata by - // configuring transformations on VirtualServices, RouteTables, and/or UpstreamGroups. - // - // follow the guide here to create requests with the proper transformation to populate 'pod_name' in the access logs: - // https://docs.solo.io/gloo-edge/latest/guides/traffic_management/request_processing/transformations/enrich_access_logs/#update-virtual-service - podName := getTransformationValueFromDynamicMetadata("pod_name", meta) - - // we could change the claim to any other jwt claim, such as client_id - // - // follow the guide here to create requests with a jwt that has the 'iss' claim, to populate issuer in the access logs: - // https://docs.solo.io/gloo-edge/latest/guides/security/auth/jwt/access_control/#appendix---use-a-remote-json-web-key-set-jwks-server - issuer := getClaimFromJwtInDynamicMetadata("iss", meta) - - statsutils.MeasureOne( - ctx, - mAccessLogsRequests, - tag.Insert(responseCodeKey, v.GetResponse().GetResponseCode().String()), - tag.Insert(clusterKey, v.GetCommonProperties().GetUpstreamCluster()), - tag.Insert(requestMethodKey, v.GetRequest().GetRequestMethod().String())) - - // this includes the time filters take during the processing of the request and response. - downstreamRespTime := v.GetCommonProperties().GetTimeToLastDownstreamTxByte() - downstreamRespTimeNs := int64(downstreamRespTime.GetNanos()) + (downstreamRespTime.GetSeconds()*1 ^ 9) - - // if envoy is buffering the request before sending upstream, you want the following - upstreamRespTimeNs := lastToFirstNs(v) - // otherwise, you want this - // upstreamRespTimeNs := firstToFirstNs(v) - - statsutils.Measure( - ctx, - mAccessLogsDownstreamRespTime, - downstreamRespTimeNs, - tag.Insert(responseCodeKey, v.GetResponse().GetResponseCode().String()), - tag.Insert(clusterKey, v.GetCommonProperties().GetUpstreamCluster()), - tag.Insert(requestMethodKey, v.GetRequest().GetRequestMethod().String())) - - statsutils.Measure( - ctx, - mAccessLogsUpstreamRespTime, - upstreamRespTimeNs, - tag.Insert(responseCodeKey, v.GetResponse().GetResponseCode().String()), - tag.Insert(clusterKey, v.GetCommonProperties().GetUpstreamCluster()), - tag.Insert(requestMethodKey, v.GetRequest().GetRequestMethod().String())) - - logger.With( - zap.Any("protocol_version", v.GetProtocolVersion()), - zap.Any("request_path", v.GetRequest().GetPath()), - zap.Any("request_original_path", v.GetRequest().GetOriginalPath()), - zap.Any("request_method", v.GetRequest().GetRequestMethod().String()), - zap.Any("request_headers", v.GetRequest().GetRequestHeaders()), - zap.Any("response_code", v.GetResponse().GetResponseCode().String()), - zap.Any("response_headers", v.GetResponse().GetResponseHeaders()), - zap.Any("response_trailers", v.GetResponse().GetResponseTrailers()), - zap.Any("cluster", v.GetCommonProperties().GetUpstreamCluster()), - zap.Any("upstream_remote_address", v.GetCommonProperties().GetUpstreamRemoteAddress()), - zap.Any("issuer", issuer), // requires jwt set up and jwt with 'iss' claim to be non-empty - zap.Any("pod_name", podName), // requires transformation set up with dynamic metadata (with 'pod_name' key) to be non-empty - zap.Any("route_name", v.GetCommonProperties().GetRouteName()), // empty by default, but name can be set on routes in virtual services or route tables - zap.Any("start_time", v.GetCommonProperties().GetStartTime()), - zap.Any("downstream_resp_time", downstreamRespTimeNs), - zap.Any("upstream_resp_time", upstreamRespTimeNs), - ).Info("received http request") - } - case *pb.StreamAccessLogsMessage_TcpLogs: - for _, v := range msg.TcpLogs.GetLogEntry() { - logger.With( - zap.Any("upstream_cluster", v.GetCommonProperties().GetUpstreamCluster()), - zap.Any("route_name", v.GetCommonProperties().GetRouteName()), - ).Info("received tcp request") - } - } - return nil - }, - }, - Ctx: ctx, - } - service := loggingservice.NewServer(opts) - - err := RunWithSettings(ctx, service, clientSettings) - - if err != nil { - if ctx.Err() == nil { - // not a context error - panic - panic(err) - } - } -} - -func RunWithSettings(ctx context.Context, service *loggingservice.Server, clientSettings Settings) error { - err := StartAccessLog(ctx, clientSettings, service) - if ctx.Err() != nil { - return ctx.Err() - } - return err -} - -func StartAccessLog(ctx context.Context, clientSettings Settings, service *loggingservice.Server) error { - srv := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{})) - - pb.RegisterAccessLogServiceServer(srv, service) - hc := healthchecker.NewGrpc(clientSettings.ServiceName, health.NewServer(), false, healthpb.HealthCheckResponse_SERVING) - healthpb.RegisterHealthServer(srv, hc.GetServer()) - reflection.Register(srv) - - logger := contextutils.LoggerFrom(ctx) - logger.Infow("Starting access-log server") - - addr := fmt.Sprintf(":%d", clientSettings.ServerPort) - runMode := "gRPC" - network := "tcp" - - logger.Infof("access-log server running in [%s] mode, listening at [%s]", runMode, addr) - lis, err := net.Listen(network, addr) - if err != nil { - logger.Errorw("Failed to announce on network", zap.Any("mode", runMode), zap.Any("address", addr), zap.Any("error", err)) - return err - } - go func() { - <-ctx.Done() - srv.Stop() - _ = lis.Close() - }() - - return srv.Serve(lis) -} - -func getTransformationValueFromDynamicMetadata(key string, filterMetadata map[string]*_struct.Struct) string { - transformationMeta := filterMetadata[transformation.FilterName] - for tKey, tVal := range transformationMeta.GetFields() { - if tKey == key { - return tVal.GetStringValue() - } - } - return "" -} - -func getClaimFromJwtInDynamicMetadata(claim string, filterMetadata map[string]*_struct.Struct) string { - providerByJwt := filterMetadata["envoy.filters.http.jwt_authn"] - jwts := providerByJwt.GetFields() - for _, jwt := range jwts { - claims := jwt.GetStructValue() - if claims != nil { - claimsMap := claims.GetFields() - if val, ok := claimsMap[claim]; ok { - return val.GetStringValue() - } - } - } - return "" -} - -func firstToFirstNs(entry *envoy_data_accesslog_v3.HTTPAccessLogEntry) int64 { - timeToFirstUpstreamRxByte := entry.GetCommonProperties().GetTimeToFirstUpstreamRxByte() - timeToFirstUpstreamRxByteNs := int64(timeToFirstUpstreamRxByte.GetNanos()) + (timeToFirstUpstreamRxByte.GetSeconds()*1 ^ 9) - timeToFirstUpstreamTxByte := entry.GetCommonProperties().GetTimeToFirstUpstreamTxByte() - timeToFirstUpstreamTxByteNs := int64(timeToFirstUpstreamTxByte.GetNanos()) + (timeToFirstUpstreamTxByte.GetSeconds()*1 ^ 9) - - // this excludes the time filters take during the processing of the request and response. - upstreamRespTimeNs := timeToFirstUpstreamRxByteNs - timeToFirstUpstreamTxByteNs - return upstreamRespTimeNs -} - -func lastToFirstNs(entry *envoy_data_accesslog_v3.HTTPAccessLogEntry) int64 { - timeToFirstUpstreamRxByte := entry.GetCommonProperties().GetTimeToFirstUpstreamRxByte() - timeToFirstUpstreamRxByteNs := int64(timeToFirstUpstreamRxByte.GetNanos()) + (timeToFirstUpstreamRxByte.GetSeconds()*1 ^ 9) - timeToLastUpstreamTxByte := entry.GetCommonProperties().GetTimeToLastUpstreamTxByte() - timeToLastUpstreamTxByteNs := int64(timeToLastUpstreamTxByte.GetNanos()) + (timeToLastUpstreamTxByte.GetSeconds()*1 ^ 9) - - // this excludes the time filters take during the processing of the request and response. - // this could, in theory, be negative. for example, the upstream could reject based on the - // request headers and respond before the request body had finished transmitting upstream. - upstreamRespTimeNs := timeToFirstUpstreamRxByteNs - timeToLastUpstreamTxByteNs - return upstreamRespTimeNs -} diff --git a/projects/accesslogger/pkg/runner/settings.go b/projects/accesslogger/pkg/runner/settings.go deleted file mode 100644 index 85ad32d96bd..00000000000 --- a/projects/accesslogger/pkg/runner/settings.go +++ /dev/null @@ -1,22 +0,0 @@ -package runner - -import ( - "github.com/kelseyhightower/envconfig" -) - -type Settings struct { - DebugPort int `envconfig:"DEBUG_PORT" default:"9091"` - ServerPort int `envconfig:"SERVER_PORT" default:"8083"` - ServiceName string `envconfig:"SERVICE_NAME" default:"AccessLog"` -} - -func NewSettings() Settings { - var s Settings - - err := envconfig.Process("", &s) - if err != nil { - panic(err) - } - - return s -} diff --git a/projects/clusteringress/README.md b/projects/clusteringress/README.md deleted file mode 100644 index 30590d824bc..00000000000 --- a/projects/clusteringress/README.md +++ /dev/null @@ -1,167 +0,0 @@ -# Knative Networking with Gloo Edge Cluster Ingress - -With Knative support enabled, Gloo Edge will configure Envoy using [Knative's Cluster Ingress Resource](https://github.com/knative/serving/blob/main/pkg/client/informers/externalversions/networking/v1alpha1/clusteringress.go). - -The installation process detailed in this document provides a way of using Knative-Serving without needing to install Istio. - -### What you'll need - -1. Kubernetes v1.11.3. We recommend using [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) or -[Kubernetes-in-Docker](https://github.com/kubernetes-sigs/kind) to get a local cluster up quickly. -1. [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) installed on your local machine. - -### Install - -#### 1. Install glooctl - -If this is your first time running Gloo Edge, you’ll need to download the command-line interface (CLI) onto your local machine. -You’ll use this CLI to interact with Gloo Edge, including installing it onto your Kubernetes cluster. - -To install the CLI, run: - -##### Linux/MacOS - -`curl -sL https://run.solo.io/gloo/install | sh` - -##### Windows - -`(New-Object System.Net.WebClient).DownloadString("https://run.solo.io/gloo/windows/install") | iex` - -Alternatively, you can download the CLI directly via the github releases page. - -Next, add Gloo Edge to your path with: - -##### Linux/MacOS - -`export PATH=$HOME/.gloo/bin:$PATH` - -##### Windows - -`$env:Path += ";$env:userprofile/.gloo/bin/"` - -Verify the CLI is installed and running correctly with: - -`glooctl version` - -#### 2. Install Knative and Gloo Edge to your Kubernetes Cluster using glooctl - -Once your Kubernetes cluster is up and running, run the following command to deploy Knative-Serving components to the `knative-serving` namespace and Gloo Edge to the `gloo-system` namespace: - -`glooctl install knative` - - -Check that the Gloo Edge and Knative pods and services have been created: - -```bash -kubectl get all -n gloo-system - -NAME READY STATUS RESTARTS AGE -pod/clusteringress-proxy-65485cd8f4-gg9qq 1/1 Running 0 10m -pod/discovery-5cf7c45fb7-ndj29 1/1 Running 0 10m -pod/gateway-7b48fdfbd8-trwvg 1/1 Running 1 10m -pod/gateway-proxy-984bcf497-29jl8 1/1 Running 0 10m -pod/gloo-5fc9f5c558-n6nlr 1/1 Running 1 10m -pod/ingress-6d8d8f595c-smql8 1/1 Running 0 10m -pod/ingress-proxy-5fc45b8f6d-cckw4 1/1 Running 0 10m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/clusteringress-proxy LoadBalancer 10.96.196.217 80:31639/TCP,443:31025/TCP 14m -service/gateway-proxy LoadBalancer 10.109.135.176 8080:32722/TCP 14m -service/gloo ClusterIP 10.103.179.64 9977/TCP 14m -service/ingress-proxy LoadBalancer 10.110.100.99 80:31738/TCP,443:31769/TCP 14m - -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.apps/clusteringress-proxy 1 1 1 1 14m -deployment.apps/discovery 1 1 1 1 14m -deployment.apps/gateway 1 1 1 1 14m -deployment.apps/gateway-proxy 1 1 1 1 14m -deployment.apps/gloo 1 1 1 1 14m -deployment.apps/ingress 1 1 1 1 14m -deployment.apps/ingress-proxy 1 1 1 1 14m - - -``` - -```bash -kubectl get all -n knative-serving - -NAME READY STATUS RESTARTS AGE -pod/activator-5c4755585c-5wv26 1/1 Running 0 15m -pod/autoscaler-78cd88f869-dvsfr 1/1 Running 0 15m -pod/controller-8d5b85958-tcqn5 1/1 Running 0 15m -pod/webhook-7585d7488c-zk9wz 1/1 Running 0 15m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/activator-service ClusterIP 10.109.189.12 80/TCP,9090/TCP 15m -service/autoscaler ClusterIP 10.98.6.4 8080/TCP,9090/TCP 15m -service/controller ClusterIP 10.108.42.33 9090/TCP 15m -service/webhook ClusterIP 10.99.201.163 443/TCP 15m - -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.apps/activator 1 1 1 1 15m -deployment.apps/autoscaler 1 1 1 1 15m -deployment.apps/controller 1 1 1 1 15m -deployment.apps/webhook 1 1 1 1 15m - -NAME DESIRED CURRENT READY AGE -replicaset.apps/activator-5c4755585c 1 1 1 15m -replicaset.apps/autoscaler-78cd88f869 1 1 1 15m -replicaset.apps/controller-8d5b85958 1 1 1 15m -replicaset.apps/webhook-7585d7488c 1 1 1 15m - -NAME AGE -image.caching.internal.knative.dev/fluentd-sidecar 15m -image.caching.internal.knative.dev/queue-proxy 15m -``` - -#### 3. Send Requests to a Knative App - -Create a Knative App: - -```bash -# deploy a basic helloworld-go service -kubectl apply -f https://raw.githubusercontent.com/solo-io/gloo/main/test/kube2e/artifacts/knative-hello-service.yaml -``` - -Get the URL of the Gloo Edge Knative Ingress: - -```bash -export INGRESS=$(glooctl proxy url --name clusteringress-proxy) -echo $INGRESS - -http://172.17.0.2:31345 -``` - -Note: if your cluster is running in minishift, you'll need to run the following command to get an externally accessible -url: - -```bash -export INGRESS=$(glooctl proxy url --name clusteringress-proxy --local-cluster) -echo $INGRESS - -http://192.168.99.163:32220 - -``` - -Send a request to the app using `curl`: - -```bash -curl -H "Host: helloworld-go.default.example.com" $INGRESS - -Hello Go Sample v1! -``` - -Everything should be up and running. If this process does not work, please [open an issue](https://github.com/solo-io/gloo/issues/new). We are happy to answer -questions on our [diligently staffed Slack channel](https://slack.solo.io/). - - -### Uninstall - -To tear down the installation at any point, you can simply run - -```bash - -kubectl delete namespace gloo-system -kubectl delete namespace knative-serving -``` - diff --git a/projects/clusteringress/api/external/knative/cluster_ingress.go b/projects/clusteringress/api/external/knative/cluster_ingress.go deleted file mode 100644 index 241c6342de0..00000000000 --- a/projects/clusteringress/api/external/knative/cluster_ingress.go +++ /dev/null @@ -1,30 +0,0 @@ -package knative - -import ( - "reflect" - - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - "github.com/solo-io/solo-kit/pkg/utils/kubeutils" - "knative.dev/networking/pkg/apis/networking/v1alpha1" -) - -type ClusterIngress v1alpha1.Ingress - -func (p *ClusterIngress) GetMetadata() *core.Metadata { - return kubeutils.FromKubeMeta(p.ObjectMeta, true) -} - -func (p *ClusterIngress) SetMetadata(meta *core.Metadata) { - p.ObjectMeta = kubeutils.ToKubeMeta(meta) -} - -func (p *ClusterIngress) Equal(that interface{}) bool { - return reflect.DeepEqual(p, that) -} - -func (p *ClusterIngress) Clone() *ClusterIngress { - ci := v1alpha1.Ingress(*p) - ciCopy := ci.DeepCopy() - newCi := ClusterIngress(*ciCopy) - return &newCi -} diff --git a/projects/clusteringress/api/external/knative/solo-kit.json b/projects/clusteringress/api/external/knative/solo-kit.json deleted file mode 100644 index cfe4e5974d3..00000000000 --- a/projects/clusteringress/api/external/knative/solo-kit.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "name": "networking.internal.knative.dev", - "version": "v1alpha1", - "custom_resources": [ - { - "package": "github.com/solo-io/gloo/projects/clusteringress/api/external/knative", - "type": "ClusterIngress", - "plural_name": "clusteringresses", - "short_name": "ci" - } - ], - "go_package": "github.com/solo-io/gloo/projects/clusteringress/pkg/api/external/knative" -} diff --git a/projects/clusteringress/api/v1/solo-kit.json b/projects/clusteringress/api/v1/solo-kit.json deleted file mode 100644 index 32fcc856889..00000000000 --- a/projects/clusteringress/api/v1/solo-kit.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "name": "clusteringress.gloo.solo.io", - "version": "v1", - "go_package": "github.com/solo-io/gloo/projects/clusteringress/pkg/api/v1", - "imports": [ - "github.com/solo-io/gloo/projects/clusteringress/api/external/knative" - ], - "resource_groups": { - "translator.clusteringress.gloo.solo.io": [ - { - "name": "ClusterIngress", - "package": "github.com/solo-io/gloo/projects/clusteringress/pkg/api/external/knative" - } - ] - } -} diff --git a/projects/clusteringress/pkg/api/custom/knative/cache.go b/projects/clusteringress/pkg/api/custom/knative/cache.go deleted file mode 100644 index a27a2e438d5..00000000000 --- a/projects/clusteringress/pkg/api/custom/knative/cache.go +++ /dev/null @@ -1,84 +0,0 @@ -package knative - -import ( - "context" - "sync" - "time" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/controller" - knativeclient "knative.dev/networking/pkg/client/clientset/versioned" - knativeinformers "knative.dev/networking/pkg/client/informers/externalversions" - knativelisters "knative.dev/networking/pkg/client/listers/networking/v1alpha1" -) - -type Cache interface { - ClusterIngressLister() knativelisters.IngressLister - Subscribe() <-chan struct{} - Unsubscribe(<-chan struct{}) -} - -type knativeCache struct { - clusterIngress knativelisters.IngressLister - - cacheUpdatedWatchers []chan struct{} - cacheUpdatedWatchersMutex sync.Mutex -} - -// This context should live as long as the cache is desired. i.e. if the cache is shared -// across clients, it should get a context that has a longer lifetime than the clients themselves -func NewClusterIngreessCache(ctx context.Context, knativeClient knativeclient.Interface) (*knativeCache, error) { - resyncDuration := 12 * time.Hour - sharedInformerFactory := knativeinformers.NewSharedInformerFactory(knativeClient, resyncDuration) - - clusterIngress := sharedInformerFactory.Networking().V1alpha1().Ingresses() - - k := &knativeCache{ - clusterIngress: clusterIngress.Lister(), - } - - kubeController := controller.NewController("knative-resources-cache", - controller.NewLockingSyncHandler(k.updatedOccurred), - clusterIngress.Informer()) - - stop := ctx.Done() - err := kubeController.Run(2, stop) - if err != nil { - return nil, err - } - - return k, nil -} - -func (k *knativeCache) ClusterIngressLister() knativelisters.IngressLister { - return k.clusterIngress -} - -func (k *knativeCache) Subscribe() <-chan struct{} { - k.cacheUpdatedWatchersMutex.Lock() - defer k.cacheUpdatedWatchersMutex.Unlock() - c := make(chan struct{}, 10) - k.cacheUpdatedWatchers = append(k.cacheUpdatedWatchers, c) - return c -} - -func (k *knativeCache) Unsubscribe(c <-chan struct{}) { - k.cacheUpdatedWatchersMutex.Lock() - defer k.cacheUpdatedWatchersMutex.Unlock() - for i, cacheUpdated := range k.cacheUpdatedWatchers { - if cacheUpdated == c { - k.cacheUpdatedWatchers = append(k.cacheUpdatedWatchers[:i], k.cacheUpdatedWatchers[i+1:]...) - return - } - } -} - -func (k *knativeCache) updatedOccurred() { - k.cacheUpdatedWatchersMutex.Lock() - defer k.cacheUpdatedWatchersMutex.Unlock() - for _, cacheUpdated := range k.cacheUpdatedWatchers { - select { - case cacheUpdated <- struct{}{}: - default: - } - } -} diff --git a/projects/clusteringress/pkg/api/custom/knative/knative_clusteringress_client.go b/projects/clusteringress/pkg/api/custom/knative/knative_clusteringress_client.go deleted file mode 100644 index 5c63c044f4a..00000000000 --- a/projects/clusteringress/pkg/api/custom/knative/knative_clusteringress_client.go +++ /dev/null @@ -1,165 +0,0 @@ -package knative - -import ( - "context" - "fmt" - "sort" - - "github.com/solo-io/go-utils/contextutils" - - "github.com/solo-io/gloo/projects/clusteringress/api/external/knative" - v1alpha1 "github.com/solo-io/gloo/projects/clusteringress/pkg/api/external/knative" - knativev1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1" - knativeclient "knative.dev/networking/pkg/client/clientset/versioned" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" -) - -type ResourceClient struct { - knativeClient knativeclient.Interface - cache Cache -} - -func NewResourceClient(knativeClient knativeclient.Interface, cache Cache) *ResourceClient { - return &ResourceClient{ - knativeClient: knativeClient, - cache: cache, - } -} - -func FromKube(ci *knativev1alpha1.Ingress) *v1alpha1.ClusterIngress { - deepCopy := ci.DeepCopy() - baseType := knative.ClusterIngress(*deepCopy) - resource := &v1alpha1.ClusterIngress{ - ClusterIngress: baseType, - } - - return resource -} - -func ToKube(resource resources.Resource) (*knativev1alpha1.Ingress, error) { - clusterIngressResource, ok := resource.(*v1alpha1.ClusterIngress) - if !ok { - return nil, errors.Errorf("internal error: invalid resource %v passed to clusteringress client", resources.Kind(resource)) - } - - clusterIngress := knativev1alpha1.Ingress(clusterIngressResource.ClusterIngress) - - return &clusterIngress, nil -} - -var _ clients.ResourceClient = &ResourceClient{} - -func (rc *ResourceClient) Kind() string { - return resources.Kind(&v1alpha1.ClusterIngress{}) -} - -func (rc *ResourceClient) NewResource() resources.Resource { - return resources.Clone(&v1alpha1.ClusterIngress{}) -} - -func (rc *ResourceClient) Register() error { - return nil -} - -func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { - contextutils.LoggerFrom(context.Background()).DPanic("this client does not support read operations") - return nil, fmt.Errorf("this client does not support read operations") -} - -func (rc *ResourceClient) Write(resource resources.Resource, opts clients.WriteOpts) (resources.Resource, error) { - contextutils.LoggerFrom(context.Background()).DPanic("this client does not support write operations") - return nil, fmt.Errorf("this client does not support write operations") -} - -func (rc *ResourceClient) Delete(namespace, name string, opts clients.DeleteOpts) error { - contextutils.LoggerFrom(context.Background()).DPanic("this client does not support delete operations") - return fmt.Errorf("this client does not support delete operations") -} - -func (rc *ResourceClient) ApplyStatus(statusClient resources.StatusClient, inputResource resources.InputResource, opts clients.ApplyStatusOpts) (resources.Resource, error) { - contextutils.LoggerFrom(context.Background()).DPanic("this client does not support apply status operations") - return nil, fmt.Errorf("this client does not support apply status operations") -} - -func (rc *ResourceClient) List(_ string, opts clients.ListOpts) (resources.ResourceList, error) { - opts = opts.WithDefaults() - - clusterIngressObjList, err := rc.cache.ClusterIngressLister().List(labels.SelectorFromSet(opts.Selector)) - if err != nil { - return nil, errors.Wrapf(err, "listing ClusterIngresses") - } - var resourceList resources.ResourceList - for _, ClusterIngressObj := range clusterIngressObjList { - resource := FromKube(ClusterIngressObj) - - if resource == nil { - continue - } - resourceList = append(resourceList, resource) - } - - sort.SliceStable(resourceList, func(i, j int) bool { - return resourceList[i].GetMetadata().GetName() < resourceList[j].GetMetadata().GetName() - }) - - return resourceList, nil -} - -func (rc *ResourceClient) Watch(_ string, opts clients.WatchOpts) (<-chan resources.ResourceList, <-chan error, error) { - opts = opts.WithDefaults() - watch := rc.cache.Subscribe() - - resourcesChan := make(chan resources.ResourceList) - errs := make(chan error) - // prevent flooding the channel with duplicates - var previous *resources.ResourceList - updateResourceList := func() { - list, err := rc.List("", clients.ListOpts{ - Ctx: opts.Ctx, - Selector: opts.Selector, - }) - if err != nil { - errs <- err - return - } - if previous != nil { - if list.Equal(*previous) { - return - } - } - previous = &list - resourcesChan <- list - } - - go func() { - defer rc.cache.Unsubscribe(watch) - defer close(resourcesChan) - defer close(errs) - - // watch should open up with an initial read - updateResourceList() - for { - select { - case _, ok := <-watch: - if !ok { - return - } - updateResourceList() - case <-opts.Ctx.Done(): - return - } - } - }() - - return resourcesChan, errs, nil -} - -func (rc *ResourceClient) exist(ctx context.Context, namespace, name string) bool { - _, err := rc.knativeClient.NetworkingV1alpha1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{}) - return err == nil -} diff --git a/projects/clusteringress/pkg/api/external/knative/cluster_ingress.sk.go b/projects/clusteringress/pkg/api/external/knative/cluster_ingress.sk.go deleted file mode 100644 index d604d000182..00000000000 --- a/projects/clusteringress/pkg/api/external/knative/cluster_ingress.sk.go +++ /dev/null @@ -1,159 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1alpha1 - -import ( - "encoding/binary" - "hash" - "hash/fnv" - "log" - "sort" - - github_com_solo_io_gloo_projects_clusteringress_api_external_knative "github.com/solo-io/gloo/projects/clusteringress/api/external/knative" - - "github.com/solo-io/go-utils/hashutils" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - "github.com/solo-io/solo-kit/pkg/errors" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - // Compile-time assertion - _ resources.Resource = new(ClusterIngress) -) - -func NewClusterIngressHashableResource() resources.HashableResource { - return new(ClusterIngress) -} - -func NewClusterIngress(namespace, name string) *ClusterIngress { - clusteringress := &ClusterIngress{} - clusteringress.ClusterIngress.SetMetadata(&core.Metadata{ - Name: name, - Namespace: namespace, - }) - return clusteringress -} - -// require custom resource to implement Clone() as well as resources.Resource interface - -type CloneableClusterIngress interface { - resources.Resource - Clone() *github_com_solo_io_gloo_projects_clusteringress_api_external_knative.ClusterIngress -} - -var _ CloneableClusterIngress = &github_com_solo_io_gloo_projects_clusteringress_api_external_knative.ClusterIngress{} - -type ClusterIngress struct { - github_com_solo_io_gloo_projects_clusteringress_api_external_knative.ClusterIngress -} - -func (r *ClusterIngress) Clone() resources.Resource { - return &ClusterIngress{ClusterIngress: *r.ClusterIngress.Clone()} -} - -func (r *ClusterIngress) Hash(hasher hash.Hash64) (uint64, error) { - if hasher == nil { - hasher = fnv.New64() - } - clone := r.ClusterIngress.Clone() - resources.UpdateMetadata(clone, func(meta *core.Metadata) { - meta.ResourceVersion = "" - }) - err := binary.Write(hasher, binary.LittleEndian, hashutils.HashAll(clone)) - if err != nil { - return 0, err - } - return hasher.Sum64(), nil -} - -func (r *ClusterIngress) MustHash() uint64 { - hashVal, err := r.Hash(nil) - if err != nil { - log.Panicf("error while hashing: (%s) this should never happen", err) - } - return hashVal -} - -func (r *ClusterIngress) GroupVersionKind() schema.GroupVersionKind { - return ClusterIngressGVK -} - -type ClusterIngressList []*ClusterIngress - -func (list ClusterIngressList) Find(namespace, name string) (*ClusterIngress, error) { - for _, clusterIngress := range list { - if clusterIngress.GetMetadata().Name == name && clusterIngress.GetMetadata().Namespace == namespace { - return clusterIngress, nil - } - } - return nil, errors.Errorf("list did not find clusterIngress %v.%v", namespace, name) -} - -func (list ClusterIngressList) AsResources() resources.ResourceList { - var ress resources.ResourceList - for _, clusterIngress := range list { - ress = append(ress, clusterIngress) - } - return ress -} - -func (list ClusterIngressList) Names() []string { - var names []string - for _, clusterIngress := range list { - names = append(names, clusterIngress.GetMetadata().Name) - } - return names -} - -func (list ClusterIngressList) NamespacesDotNames() []string { - var names []string - for _, clusterIngress := range list { - names = append(names, clusterIngress.GetMetadata().Namespace+"."+clusterIngress.GetMetadata().Name) - } - return names -} - -func (list ClusterIngressList) Sort() ClusterIngressList { - sort.SliceStable(list, func(i, j int) bool { - return list[i].GetMetadata().Less(list[j].GetMetadata()) - }) - return list -} - -func (list ClusterIngressList) Clone() ClusterIngressList { - var clusterIngressList ClusterIngressList - for _, clusterIngress := range list { - clusterIngressList = append(clusterIngressList, resources.Clone(clusterIngress).(*ClusterIngress)) - } - return clusterIngressList -} - -func (list ClusterIngressList) Each(f func(element *ClusterIngress)) { - for _, clusterIngress := range list { - f(clusterIngress) - } -} - -func (list ClusterIngressList) EachResource(f func(element resources.Resource)) { - for _, clusterIngress := range list { - f(clusterIngress) - } -} - -func (list ClusterIngressList) AsInterfaces() []interface{} { - var asInterfaces []interface{} - list.Each(func(element *ClusterIngress) { - asInterfaces = append(asInterfaces, element) - }) - return asInterfaces -} - -var ( - ClusterIngressGVK = schema.GroupVersionKind{ - Version: "v1alpha1", - Group: "networking.internal.knative.dev", - Kind: "ClusterIngress", - } -) diff --git a/projects/clusteringress/pkg/api/external/knative/cluster_ingress_client.sk.go b/projects/clusteringress/pkg/api/external/knative/cluster_ingress_client.sk.go deleted file mode 100644 index 7a58bf8d57a..00000000000 --- a/projects/clusteringress/pkg/api/external/knative/cluster_ingress_client.sk.go +++ /dev/null @@ -1,130 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/errors" -) - -type ClusterIngressWatcher interface { - // watch namespace-scoped clusteringresses - Watch(namespace string, opts clients.WatchOpts) (<-chan ClusterIngressList, <-chan error, error) -} - -type ClusterIngressClient interface { - BaseClient() clients.ResourceClient - Register() error - Read(namespace, name string, opts clients.ReadOpts) (*ClusterIngress, error) - Write(resource *ClusterIngress, opts clients.WriteOpts) (*ClusterIngress, error) - Delete(namespace, name string, opts clients.DeleteOpts) error - List(namespace string, opts clients.ListOpts) (ClusterIngressList, error) - ClusterIngressWatcher -} - -type clusterIngressClient struct { - rc clients.ResourceClient -} - -func NewClusterIngressClient(ctx context.Context, rcFactory factory.ResourceClientFactory) (ClusterIngressClient, error) { - return NewClusterIngressClientWithToken(ctx, rcFactory, "") -} - -func NewClusterIngressClientWithToken(ctx context.Context, rcFactory factory.ResourceClientFactory, token string) (ClusterIngressClient, error) { - rc, err := rcFactory.NewResourceClient(ctx, factory.NewResourceClientParams{ - ResourceType: &ClusterIngress{}, - Token: token, - }) - if err != nil { - return nil, errors.Wrapf(err, "creating base ClusterIngress resource client") - } - return NewClusterIngressClientWithBase(rc), nil -} - -func NewClusterIngressClientWithBase(rc clients.ResourceClient) ClusterIngressClient { - return &clusterIngressClient{ - rc: rc, - } -} - -func (client *clusterIngressClient) BaseClient() clients.ResourceClient { - return client.rc -} - -func (client *clusterIngressClient) Register() error { - return client.rc.Register() -} - -func (client *clusterIngressClient) Read(namespace, name string, opts clients.ReadOpts) (*ClusterIngress, error) { - opts = opts.WithDefaults() - - resource, err := client.rc.Read(namespace, name, opts) - if err != nil { - return nil, err - } - return resource.(*ClusterIngress), nil -} - -func (client *clusterIngressClient) Write(clusterIngress *ClusterIngress, opts clients.WriteOpts) (*ClusterIngress, error) { - opts = opts.WithDefaults() - resource, err := client.rc.Write(clusterIngress, opts) - if err != nil { - return nil, err - } - return resource.(*ClusterIngress), nil -} - -func (client *clusterIngressClient) Delete(namespace, name string, opts clients.DeleteOpts) error { - opts = opts.WithDefaults() - - return client.rc.Delete(namespace, name, opts) -} - -func (client *clusterIngressClient) List(namespace string, opts clients.ListOpts) (ClusterIngressList, error) { - opts = opts.WithDefaults() - - resourceList, err := client.rc.List(namespace, opts) - if err != nil { - return nil, err - } - return convertToClusterIngress(resourceList), nil -} - -func (client *clusterIngressClient) Watch(namespace string, opts clients.WatchOpts) (<-chan ClusterIngressList, <-chan error, error) { - opts = opts.WithDefaults() - - resourcesChan, errs, initErr := client.rc.Watch(namespace, opts) - if initErr != nil { - return nil, nil, initErr - } - clusteringressesChan := make(chan ClusterIngressList) - go func() { - for { - select { - case resourceList := <-resourcesChan: - select { - case clusteringressesChan <- convertToClusterIngress(resourceList): - case <-opts.Ctx.Done(): - close(clusteringressesChan) - return - } - case <-opts.Ctx.Done(): - close(clusteringressesChan) - return - } - } - }() - return clusteringressesChan, errs, nil -} - -func convertToClusterIngress(resources resources.ResourceList) ClusterIngressList { - var clusterIngressList ClusterIngressList - for _, resource := range resources { - clusterIngressList = append(clusterIngressList, resource.(*ClusterIngress)) - } - return clusterIngressList -} diff --git a/projects/clusteringress/pkg/api/external/knative/cluster_ingress_reconciler.sk.go b/projects/clusteringress/pkg/api/external/knative/cluster_ingress_reconciler.sk.go deleted file mode 100644 index 2e199d7a1ce..00000000000 --- a/projects/clusteringress/pkg/api/external/knative/cluster_ingress_reconciler.sk.go +++ /dev/null @@ -1,47 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1alpha1 - -import ( - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/reconcile" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" -) - -// Option to copy anything from the original to the desired before writing. Return value of false means don't update -type TransitionClusterIngressFunc func(original, desired *ClusterIngress) (bool, error) - -type ClusterIngressReconciler interface { - Reconcile(namespace string, desiredResources ClusterIngressList, transition TransitionClusterIngressFunc, opts clients.ListOpts) error -} - -func clusterIngresssToResources(list ClusterIngressList) resources.ResourceList { - var resourceList resources.ResourceList - for _, clusterIngress := range list { - resourceList = append(resourceList, clusterIngress) - } - return resourceList -} - -func NewClusterIngressReconciler(client ClusterIngressClient, statusSetter resources.StatusSetter) ClusterIngressReconciler { - return &clusterIngressReconciler{ - base: reconcile.NewReconciler(client.BaseClient(), statusSetter), - } -} - -type clusterIngressReconciler struct { - base reconcile.Reconciler -} - -func (r *clusterIngressReconciler) Reconcile(namespace string, desiredResources ClusterIngressList, transition TransitionClusterIngressFunc, opts clients.ListOpts) error { - opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "clusterIngress_reconciler") - var transitionResources reconcile.TransitionResourcesFunc - if transition != nil { - transitionResources = func(original, desired resources.Resource) (bool, error) { - return transition(original.(*ClusterIngress), desired.(*ClusterIngress)) - } - } - return r.base.Reconcile(namespace, clusterIngresssToResources(desiredResources), transitionResources, opts) -} diff --git a/projects/clusteringress/pkg/api/v1/translator_event_loop.sk.go b/projects/clusteringress/pkg/api/v1/translator_event_loop.sk.go deleted file mode 100644 index f2affe18ec7..00000000000 --- a/projects/clusteringress/pkg/api/v1/translator_event_loop.sk.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - "github.com/hashicorp/go-multierror" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" -) - -var ( - mTranslatorSnapshotTimeSec = stats.Float64("translator.clusteringress.gloo.solo.io/sync/time_sec", "The time taken for a given sync", "1") - mTranslatorSnapshotTimeSecView = &view.View{ - Name: "translator.clusteringress.gloo.solo.io/sync/time_sec", - Description: "The time taken for a given sync", - TagKeys: []tag.Key{tag.MustNewKey("syncer_name")}, - Measure: mTranslatorSnapshotTimeSec, - Aggregation: view.Distribution(0.01, 0.05, 0.1, 0.25, 0.5, 1, 5, 10, 60), - } -) - -func init() { - view.Register( - mTranslatorSnapshotTimeSecView, - ) -} - -type TranslatorSyncer interface { - Sync(context.Context, *TranslatorSnapshot) error -} - -type TranslatorSyncers []TranslatorSyncer - -func (s TranslatorSyncers) Sync(ctx context.Context, snapshot *TranslatorSnapshot) error { - var multiErr *multierror.Error - for _, syncer := range s { - if err := syncer.Sync(ctx, snapshot); err != nil { - multiErr = multierror.Append(multiErr, err) - } - } - return multiErr.ErrorOrNil() -} - -type translatorEventLoop struct { - emitter TranslatorSnapshotEmitter - syncer TranslatorSyncer - ready chan struct{} -} - -func NewTranslatorEventLoop(emitter TranslatorSnapshotEmitter, syncer TranslatorSyncer) eventloop.EventLoop { - return &translatorEventLoop{ - emitter: emitter, - syncer: syncer, - ready: make(chan struct{}), - } -} - -func (el *translatorEventLoop) Ready() <-chan struct{} { - return el.ready -} - -func (el *translatorEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { - opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "v1.event_loop") - logger := contextutils.LoggerFrom(opts.Ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(namespaces, opts) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "v1.emitter errors") - go func() { - var channelClosed bool - - // create a new context for each loop, cancel it before each loop - var cancel context.CancelFunc = func() {} - - // use closure to allow cancel function to be updated as context changes - defer func() { cancel() }() - - // cache the previous snapshot for comparison - var previousSnapshot *TranslatorSnapshot - - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - - if syncDecider, isDecider := el.syncer.(TranslatorSyncDecider); isDecider { - if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } else if syncDeciderWithContext, isDecider := el.syncer.(TranslatorSyncDeciderWithContext); isDecider { - if shouldSync := syncDeciderWithContext.ShouldSync(opts.Ctx, previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } - - // cancel any open watches from previous loop - cancel() - - startTime := time.Now() - ctx, span := trace.StartSpan(opts.Ctx, "translator.clusteringress.gloo.solo.io.EventLoopSync") - ctx, canc := context.WithCancel(ctx) - cancel = canc - err := el.syncer.Sync(ctx, snapshot) - stats.RecordWithTags( - ctx, - []tag.Mutator{ - tag.Insert(skstats.SyncerNameKey, fmt.Sprintf("%T", el.syncer)), - }, - mTranslatorSnapshotTimeSec.M(time.Now().Sub(startTime).Seconds()), - ) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } else if !channelClosed { - channelClosed = true - close(el.ready) - } - - previousSnapshot = snapshot - - case <-opts.Ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/projects/clusteringress/pkg/api/v1/translator_simple_event_loop.sk.go b/projects/clusteringress/pkg/api/v1/translator_simple_event_loop.sk.go deleted file mode 100644 index 11119072cad..00000000000 --- a/projects/clusteringress/pkg/api/v1/translator_simple_event_loop.sk.go +++ /dev/null @@ -1,134 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" -) - -// SyncDeciders Syncer which implements this interface -// can make smarter decisions over whether -// it should be restarted (including having its context cancelled) -// based on a diff of the previous and current snapshot - -// Deprecated: use TranslatorSyncDeciderWithContext -type TranslatorSyncDecider interface { - TranslatorSyncer - ShouldSync(old, new *TranslatorSnapshot) bool -} - -type TranslatorSyncDeciderWithContext interface { - TranslatorSyncer - ShouldSync(ctx context.Context, old, new *TranslatorSnapshot) bool -} - -type translatorSimpleEventLoop struct { - emitter TranslatorSimpleEmitter - syncers []TranslatorSyncer -} - -func NewTranslatorSimpleEventLoop(emitter TranslatorSimpleEmitter, syncers ...TranslatorSyncer) eventloop.SimpleEventLoop { - return &translatorSimpleEventLoop{ - emitter: emitter, - syncers: syncers, - } -} - -func (el *translatorSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { - ctx = contextutils.WithLogger(ctx, "v1.event_loop") - logger := contextutils.LoggerFrom(ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(ctx) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - - go errutils.AggregateErrs(ctx, errs, emitterErrs, "v1.emitter errors") - go func() { - // create a new context for each syncer for each loop, cancel each before each loop - syncerCancels := make(map[TranslatorSyncer]context.CancelFunc) - - // use closure to allow cancel function to be updated as context changes - defer func() { - for _, cancel := range syncerCancels { - cancel() - } - }() - - // cache the previous snapshot for comparison - var previousSnapshot *TranslatorSnapshot - - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - - // cancel any open watches from previous loop - for _, syncer := range el.syncers { - // allow the syncer to decide if we should sync it + cancel its previous context - if syncDecider, isDecider := syncer.(TranslatorSyncDecider); isDecider { - if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } else if syncDeciderWithContext, isDecider := syncer.(TranslatorSyncDeciderWithContext); isDecider { - if shouldSync := syncDeciderWithContext.ShouldSync(ctx, previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } - - // if this syncer had a previous context, cancel it - cancel, ok := syncerCancels[syncer] - if ok { - cancel() - } - - startTime := time.Now() - ctx, span := trace.StartSpan(ctx, fmt.Sprintf("translator.clusteringress.gloo.solo.io.SimpleEventLoopSync-%T", syncer)) - ctx, canc := context.WithCancel(ctx) - err := syncer.Sync(ctx, snapshot) - stats.RecordWithTags( - ctx, - []tag.Mutator{ - tag.Insert(skstats.SyncerNameKey, fmt.Sprintf("%T", syncer)), - }, - mTranslatorSnapshotTimeSec.M(time.Now().Sub(startTime).Seconds()), - ) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } - - syncerCancels[syncer] = canc - } - - previousSnapshot = snapshot - - case <-ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/projects/clusteringress/pkg/api/v1/translator_snapshot.sk.go b/projects/clusteringress/pkg/api/v1/translator_snapshot.sk.go deleted file mode 100644 index 9920c81ebe0..00000000000 --- a/projects/clusteringress/pkg/api/v1/translator_snapshot.sk.go +++ /dev/null @@ -1,146 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "fmt" - "hash" - "hash/fnv" - "log" - - github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative "github.com/solo-io/gloo/projects/clusteringress/pkg/api/external/knative" - - "github.com/rotisserie/eris" - "github.com/solo-io/go-utils/hashutils" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - "go.uber.org/zap" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type TranslatorSnapshot struct { - Clusteringresses github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressList -} - -func (s TranslatorSnapshot) Clone() TranslatorSnapshot { - return TranslatorSnapshot{ - Clusteringresses: s.Clusteringresses.Clone(), - } -} - -func (s TranslatorSnapshot) Hash(hasher hash.Hash64) (uint64, error) { - if hasher == nil { - hasher = fnv.New64() - } - if _, err := s.hashClusteringresses(hasher); err != nil { - return 0, err - } - return hasher.Sum64(), nil -} - -func (s TranslatorSnapshot) hashClusteringresses(hasher hash.Hash64) (uint64, error) { - return hashutils.HashAllSafe(hasher, s.Clusteringresses.AsInterfaces()...) -} - -func (s TranslatorSnapshot) HashFields() []zap.Field { - var fields []zap.Field - hasher := fnv.New64() - ClusteringressesHash, err := s.hashClusteringresses(hasher) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - fields = append(fields, zap.Uint64("clusteringresses", ClusteringressesHash)) - snapshotHash, err := s.Hash(hasher) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - return append(fields, zap.Uint64("snapshotHash", snapshotHash)) -} - -func (s *TranslatorSnapshot) GetResourcesList(resource resources.Resource) (resources.ResourceList, error) { - switch resource.(type) { - case *github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngress: - return s.Clusteringresses.AsResources(), nil - default: - return resources.ResourceList{}, eris.New("did not contain the input resource type returning empty list") - } -} - -func (s *TranslatorSnapshot) RemoveFromResourceList(resource resources.Resource) error { - refKey := resource.GetMetadata().Ref().Key() - switch resource.(type) { - case *github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngress: - - for i, res := range s.Clusteringresses { - if refKey == res.GetMetadata().Ref().Key() { - s.Clusteringresses = append(s.Clusteringresses[:i], s.Clusteringresses[i+1:]...) - break - } - } - return nil - default: - return eris.Errorf("did not remove the resource because its type does not exist [%T]", resource) - } -} - -func (s *TranslatorSnapshot) RemoveMatches(predicate core.Predicate) { - var Clusteringresses github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressList - for _, res := range s.Clusteringresses { - if matches := predicate(res.GetMetadata()); !matches { - Clusteringresses = append(Clusteringresses, res) - } - } - s.Clusteringresses = Clusteringresses -} - -func (s *TranslatorSnapshot) UpsertToResourceList(resource resources.Resource) error { - refKey := resource.GetMetadata().Ref().Key() - switch typed := resource.(type) { - case *github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngress: - updated := false - for i, res := range s.Clusteringresses { - if refKey == res.GetMetadata().Ref().Key() { - s.Clusteringresses[i] = typed - updated = true - } - } - if !updated { - s.Clusteringresses = append(s.Clusteringresses, typed) - } - s.Clusteringresses.Sort() - return nil - default: - return eris.Errorf("did not add/replace the resource type because it does not exist %T", resource) - } -} - -type TranslatorSnapshotStringer struct { - Version uint64 - Clusteringresses []string -} - -func (ss TranslatorSnapshotStringer) String() string { - s := fmt.Sprintf("TranslatorSnapshot %v\n", ss.Version) - - s += fmt.Sprintf(" Clusteringresses %v\n", len(ss.Clusteringresses)) - for _, name := range ss.Clusteringresses { - s += fmt.Sprintf(" %v\n", name) - } - - return s -} - -func (s TranslatorSnapshot) Stringer() TranslatorSnapshotStringer { - snapshotHash, err := s.Hash(nil) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - return TranslatorSnapshotStringer{ - Version: snapshotHash, - Clusteringresses: s.Clusteringresses.NamespacesDotNames(), - } -} - -var TranslatorGvkToHashableResource = map[schema.GroupVersionKind]func() resources.HashableResource{ - github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressGVK: github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.NewClusterIngressHashableResource, -} diff --git a/projects/clusteringress/pkg/api/v1/translator_snapshot_emitter.sk.go b/projects/clusteringress/pkg/api/v1/translator_snapshot_emitter.sk.go deleted file mode 100644 index 08e428e7f16..00000000000 --- a/projects/clusteringress/pkg/api/v1/translator_snapshot_emitter.sk.go +++ /dev/null @@ -1,261 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "sync" - "time" - - github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative "github.com/solo-io/gloo/projects/clusteringress/pkg/api/external/knative" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.uber.org/zap" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" -) - -var ( - // Deprecated. See mTranslatorResourcesIn - mTranslatorSnapshotIn = stats.Int64("translator.clusteringress.gloo.solo.io/emitter/snap_in", "Deprecated. Use translator.clusteringress.gloo.solo.io/emitter/resources_in. The number of snapshots in", "1") - - // metrics for emitter - mTranslatorResourcesIn = stats.Int64("translator.clusteringress.gloo.solo.io/emitter/resources_in", "The number of resource lists received on open watch channels", "1") - mTranslatorSnapshotOut = stats.Int64("translator.clusteringress.gloo.solo.io/emitter/snap_out", "The number of snapshots out", "1") - mTranslatorSnapshotMissed = stats.Int64("translator.clusteringress.gloo.solo.io/emitter/snap_missed", "The number of snapshots missed", "1") - - // views for emitter - // deprecated: see translatorResourcesInView - translatorsnapshotInView = &view.View{ - Name: "translator.clusteringress.gloo.solo.io/emitter/snap_in", - Measure: mTranslatorSnapshotIn, - Description: "Deprecated. Use translator.clusteringress.gloo.solo.io/emitter/resources_in. The number of snapshots updates coming in.", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - - translatorResourcesInView = &view.View{ - Name: "translator.clusteringress.gloo.solo.io/emitter/resources_in", - Measure: mTranslatorResourcesIn, - Description: "The number of resource lists received on open watch channels", - Aggregation: view.Count(), - TagKeys: []tag.Key{ - skstats.NamespaceKey, - skstats.ResourceKey, - }, - } - translatorsnapshotOutView = &view.View{ - Name: "translator.clusteringress.gloo.solo.io/emitter/snap_out", - Measure: mTranslatorSnapshotOut, - Description: "The number of snapshots updates going out", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - translatorsnapshotMissedView = &view.View{ - Name: "translator.clusteringress.gloo.solo.io/emitter/snap_missed", - Measure: mTranslatorSnapshotMissed, - Description: "The number of snapshots updates going missed. this can happen in heavy load. missed snapshot will be re-tried after a second.", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } -) - -func init() { - view.Register( - translatorsnapshotInView, - translatorsnapshotOutView, - translatorsnapshotMissedView, - translatorResourcesInView, - ) -} - -type TranslatorSnapshotEmitter interface { - Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TranslatorSnapshot, <-chan error, error) -} - -type TranslatorEmitter interface { - TranslatorSnapshotEmitter - Register() error - ClusterIngress() github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressClient -} - -func NewTranslatorEmitter(clusterIngressClient github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressClient) TranslatorEmitter { - return NewTranslatorEmitterWithEmit(clusterIngressClient, make(chan struct{})) -} - -func NewTranslatorEmitterWithEmit(clusterIngressClient github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressClient, emit <-chan struct{}) TranslatorEmitter { - return &translatorEmitter{ - clusterIngress: clusterIngressClient, - forceEmit: emit, - } -} - -type translatorEmitter struct { - forceEmit <-chan struct{} - clusterIngress github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressClient -} - -func (c *translatorEmitter) Register() error { - if err := c.clusterIngress.Register(); err != nil { - return err - } - return nil -} - -func (c *translatorEmitter) ClusterIngress() github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressClient { - return c.clusterIngress -} - -func (c *translatorEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TranslatorSnapshot, <-chan error, error) { - - if len(watchNamespaces) == 0 { - watchNamespaces = []string{""} - } - - for _, ns := range watchNamespaces { - if ns == "" && len(watchNamespaces) > 1 { - return nil, nil, errors.Errorf("the \"\" namespace is used to watch all namespaces. Snapshots can either be tracked for " + - "specific namespaces or \"\" AllNamespaces, but not both.") - } - } - - errs := make(chan error) - var done sync.WaitGroup - ctx := opts.Ctx - /* Create channel for ClusterIngress */ - type clusterIngressListWithNamespace struct { - list github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressList - namespace string - } - clusterIngressChan := make(chan clusterIngressListWithNamespace) - - var initialClusterIngressList github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressList - - currentSnapshot := TranslatorSnapshot{} - clusteringressesByNamespace := make(map[string]github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressList) - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for ClusterIngress */ - { - clusteringresses, err := c.clusterIngress.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial ClusterIngress list") - } - initialClusterIngressList = append(initialClusterIngressList, clusteringresses...) - clusteringressesByNamespace[namespace] = clusteringresses - } - clusterIngressNamespacesChan, clusterIngressErrs, err := c.clusterIngress.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting ClusterIngress watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, clusterIngressErrs, namespace+"-clusteringresses") - }(namespace) - - /* Watch for changes and update snapshot */ - go func(namespace string) { - for { - select { - case <-ctx.Done(): - return - case clusterIngressList, ok := <-clusterIngressNamespacesChan: - if !ok { - return - } - select { - case <-ctx.Done(): - return - case clusterIngressChan <- clusterIngressListWithNamespace{list: clusterIngressList, namespace: namespace}: - } - } - } - }(namespace) - } - /* Initialize snapshot for Clusteringresses */ - currentSnapshot.Clusteringresses = initialClusterIngressList.Sort() - - snapshots := make(chan *TranslatorSnapshot) - go func() { - // sent initial snapshot to kick off the watch - initialSnapshot := currentSnapshot.Clone() - snapshots <- &initialSnapshot - - timer := time.NewTicker(time.Second * 1) - previousHash, err := currentSnapshot.Hash(nil) - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - sync := func() { - currentHash, err := currentSnapshot.Hash(nil) - // this should never happen, so panic if it does - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - if previousHash == currentHash { - return - } - - sentSnapshot := currentSnapshot.Clone() - select { - case snapshots <- &sentSnapshot: - stats.Record(ctx, mTranslatorSnapshotOut.M(1)) - previousHash = currentHash - default: - stats.Record(ctx, mTranslatorSnapshotMissed.M(1)) - } - } - - defer func() { - close(snapshots) - // we must wait for done before closing the error chan, - // to avoid sending on close channel. - done.Wait() - close(errs) - }() - for { - record := func() { stats.Record(ctx, mTranslatorSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case clusterIngressNamespacedList, ok := <-clusterIngressChan: - if !ok { - return - } - record() - - namespace := clusterIngressNamespacedList.namespace - - skstats.IncrementResourceCount( - ctx, - namespace, - "cluster_ingress", - mTranslatorResourcesIn, - ) - - // merge lists by namespace - clusteringressesByNamespace[namespace] = clusterIngressNamespacedList.list - var clusterIngressList github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngressList - for _, clusteringresses := range clusteringressesByNamespace { - clusterIngressList = append(clusterIngressList, clusteringresses...) - } - currentSnapshot.Clusteringresses = clusterIngressList.Sort() - } - } - }() - return snapshots, errs, nil -} diff --git a/projects/clusteringress/pkg/api/v1/translator_snapshot_simple_emitter.sk.go b/projects/clusteringress/pkg/api/v1/translator_snapshot_simple_emitter.sk.go deleted file mode 100644 index 1d184472983..00000000000 --- a/projects/clusteringress/pkg/api/v1/translator_snapshot_simple_emitter.sk.go +++ /dev/null @@ -1,109 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative "github.com/solo-io/gloo/projects/clusteringress/pkg/api/external/knative" - - "go.opencensus.io/stats" - "go.uber.org/zap" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" -) - -type TranslatorSimpleEmitter interface { - Snapshots(ctx context.Context) (<-chan *TranslatorSnapshot, <-chan error, error) -} - -func NewTranslatorSimpleEmitter(aggregatedWatch clients.ResourceWatch) TranslatorSimpleEmitter { - return NewTranslatorSimpleEmitterWithEmit(aggregatedWatch, make(chan struct{})) -} - -func NewTranslatorSimpleEmitterWithEmit(aggregatedWatch clients.ResourceWatch, emit <-chan struct{}) TranslatorSimpleEmitter { - return &translatorSimpleEmitter{ - aggregatedWatch: aggregatedWatch, - forceEmit: emit, - } -} - -type translatorSimpleEmitter struct { - forceEmit <-chan struct{} - aggregatedWatch clients.ResourceWatch -} - -func (c *translatorSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TranslatorSnapshot, <-chan error, error) { - snapshots := make(chan *TranslatorSnapshot) - errs := make(chan error) - - untyped, watchErrs, err := c.aggregatedWatch(ctx) - if err != nil { - return nil, nil, err - } - - go errutils.AggregateErrs(ctx, errs, watchErrs, "translator-emitter") - - go func() { - currentSnapshot := TranslatorSnapshot{} - timer := time.NewTicker(time.Second * 1) - var previousHash uint64 - sync := func() { - currentHash, err := currentSnapshot.Hash(nil) - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - if previousHash == currentHash { - return - } - - previousHash = currentHash - - stats.Record(ctx, mTranslatorSnapshotOut.M(1)) - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - } - - defer func() { - close(snapshots) - close(errs) - }() - - for { - record := func() { stats.Record(ctx, mTranslatorSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case untypedList := <-untyped: - record() - - currentSnapshot = TranslatorSnapshot{} - for _, res := range untypedList { - switch typed := res.(type) { - case *github_com_solo_io_gloo_projects_clusteringress_pkg_api_external_knative.ClusterIngress: - currentSnapshot.Clusteringresses = append(currentSnapshot.Clusteringresses, typed) - default: - select { - case errs <- fmt.Errorf("TranslatorSnapshotEmitter "+ - "cannot process resource %v of type %T", res.GetMetadata().Ref(), res): - case <-ctx.Done(): - return - } - } - } - - } - } - }() - return snapshots, errs, nil -} diff --git a/projects/clusteringress/pkg/translator/translate.go b/projects/clusteringress/pkg/translator/translate.go deleted file mode 100644 index 9df9f424f4c..00000000000 --- a/projects/clusteringress/pkg/translator/translate.go +++ /dev/null @@ -1,26 +0,0 @@ -package translator - -import ( - "context" - - v1 "github.com/solo-io/gloo/projects/clusteringress/pkg/api/v1" - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - "github.com/solo-io/gloo/projects/knative/pkg/translator" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - knativev1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1" -) - -const ( - proxyName = "clusteringress-proxy" -) - -func translateProxy(ctx context.Context, namespace string, snap *v1.TranslatorSnapshot) (*gloov1.Proxy, error) { - // use map of *core.Metadata to support both Ingress and ClusterIngress, - // which share the same Spec type - ingresses := make(map[*core.Metadata]knativev1alpha1.IngressSpec) - for _, ing := range snap.Clusteringresses { - meta := ing.GetMetadata() - ingresses[meta] = ing.Spec - } - return translator.TranslateProxyFromSpecs(ctx, proxyName, namespace, ingresses) -} diff --git a/projects/clusteringress/pkg/translator/translate_test.go b/projects/clusteringress/pkg/translator/translate_test.go deleted file mode 100644 index 9bee594008f..00000000000 --- a/projects/clusteringress/pkg/translator/translate_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package translator - -import ( - "context" - "time" - - "github.com/golang/protobuf/ptypes/wrappers" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/ssl" - - "github.com/golang/protobuf/ptypes" - "github.com/golang/protobuf/ptypes/duration" - test_matchers "github.com/solo-io/solo-kit/test/matchers" - - envoycore_sk "github.com/solo-io/solo-kit/pkg/api/external/envoy/api/v2/core" - - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/core/matchers" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/options/headers" - - "github.com/solo-io/gloo/projects/clusteringress/api/external/knative" - v1alpha12 "github.com/solo-io/gloo/projects/clusteringress/pkg/api/external/knative" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - v1 "github.com/solo-io/gloo/projects/clusteringress/pkg/api/v1" - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "knative.dev/networking/pkg/apis/networking/v1alpha1" -) - -var _ = Describe("Translate", func() { - It("creates the appropriate proxy object for the provided ingress objects", func() { - namespace := "example" - serviceName := "peteszah-service" - serviceNamespace := "peteszah-service-namespace" - servicePort := int32(8080) - secretName := "areallygreatsecret" - ingress := &v1alpha1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ing", - Namespace: namespace, - }, - Spec: v1alpha1.IngressSpec{ - Rules: []v1alpha1.IngressRule{ - { - Hosts: []string{"petes.com", "zah.net"}, - HTTP: &v1alpha1.HTTPIngressRuleValue{ - Paths: []v1alpha1.HTTPIngressPath{ - { - Path: "/", - Splits: []v1alpha1.IngressBackendSplit{ - { - IngressBackend: v1alpha1.IngressBackend{ - ServiceName: serviceName, - ServiceNamespace: serviceNamespace, - ServicePort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: servicePort, - }, - }, - }, - }, - AppendHeaders: map[string]string{"add": "me"}, - }, - }, - }, - }, - { - Hosts: []string{"pog.com", "champ.net", "zah.net"}, - HTTP: &v1alpha1.HTTPIngressRuleValue{ - Paths: []v1alpha1.HTTPIngressPath{ - { - Path: "/hay", - Splits: []v1alpha1.IngressBackendSplit{ - { - IngressBackend: v1alpha1.IngressBackend{ - ServiceName: serviceName, - ServiceNamespace: serviceNamespace, - ServicePort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: servicePort, - }, - }, - }, - }, - AppendHeaders: map[string]string{"add": "me"}, - }, - }, - }, - }, - }, - }, - } - ingressTls := &v1alpha1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ing-tls", - Namespace: namespace, - }, - Spec: v1alpha1.IngressSpec{ - TLS: []v1alpha1.IngressTLS{ - { - Hosts: []string{"petes.com"}, - SecretName: secretName, - }, - }, - Rules: []v1alpha1.IngressRule{ - { - Hosts: []string{"petes.com", "zah.net"}, - HTTP: &v1alpha1.HTTPIngressRuleValue{ - Paths: []v1alpha1.HTTPIngressPath{ - { - Path: "/", - Splits: []v1alpha1.IngressBackendSplit{ - { - IngressBackend: v1alpha1.IngressBackend{ - ServiceName: serviceName, - ServiceNamespace: serviceNamespace, - ServicePort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: servicePort, - }, - }, - }, - }, - AppendHeaders: map[string]string{"add": "me"}, - }, - }, - }, - }, - }, - }, - } - ingressRes := &v1alpha12.ClusterIngress{ClusterIngress: knative.ClusterIngress(*ingress)} - ingressResTls := &v1alpha12.ClusterIngress{ClusterIngress: knative.ClusterIngress(*ingressTls)} - snap := &v1.TranslatorSnapshot{ - Clusteringresses: v1alpha12.ClusterIngressList{ingressRes, ingressResTls}, - } - proxy, errs := translateProxy(context.TODO(), namespace, snap) - Expect(errs).NotTo(HaveOccurred()) - Expect(proxy.Metadata.Name).To(Equal("clusteringress-proxy")) - Expect(proxy.Listeners).To(HaveLen(2)) - Expect(proxy.Listeners[0].Name).To(Equal("http")) - Expect(proxy.Listeners[0].BindPort).To(Equal(uint32(8080))) - - expected := &gloov1.Proxy{ - Listeners: []*gloov1.Listener{ - { - Name: "http", - BindAddress: "::", - BindPort: 8080, - ListenerType: &gloov1.Listener_HttpListener{ - HttpListener: &gloov1.HttpListener{ - VirtualHosts: []*gloov1.VirtualHost{ - { - Name: "example.ing-0", - Domains: []string{ - "petes.com", - "petes.com:8080", - "zah.net", - "zah.net:8080", - }, - Routes: []*gloov1.Route{ - { - Matchers: []*matchers.Matcher{{ - PathSpecifier: &matchers.Matcher_Regex{ - Regex: "/", - }, - }}, - Action: &gloov1.Route_RouteAction{ - RouteAction: &gloov1.RouteAction{ - Destination: &gloov1.RouteAction_Multi{ - Multi: &gloov1.MultiDestination{ - Destinations: []*gloov1.WeightedDestination{ - { - Destination: &gloov1.Destination{ - DestinationType: &gloov1.Destination_Kube{ - Kube: &gloov1.KubernetesServiceDestination{ - Ref: &core.ResourceRef{ - Name: "peteszah-service", - Namespace: "peteszah-service-namespace", - }, - Port: 8080, - }, - }, - }, - Weight: &wrappers.UInt32Value{Value: 0x00000064}, - }, - }, - }, - }, - }, - }, - Options: &gloov1.RouteOptions{ - HeaderManipulation: &headers.HeaderManipulation{ - RequestHeadersToAdd: []*envoycore_sk.HeaderValueOption{{HeaderOption: &envoycore_sk.HeaderValueOption_Header{Header: &envoycore_sk.HeaderValue{Key: "add", Value: "me"}}}}, - }, - }, - }, - }, - }, - { - Name: "example.ing-1", - Domains: []string{ - "champ.net", - "champ.net:8080", - "pog.com", - "pog.com:8080", - "zah.net", - "zah.net:8080", - }, - Routes: []*gloov1.Route{ - { - Matchers: []*matchers.Matcher{{ - PathSpecifier: &matchers.Matcher_Regex{ - Regex: "/hay", - }, - }}, - Action: &gloov1.Route_RouteAction{ - RouteAction: &gloov1.RouteAction{ - Destination: &gloov1.RouteAction_Multi{ - Multi: &gloov1.MultiDestination{ - Destinations: []*gloov1.WeightedDestination{ - { - Destination: &gloov1.Destination{ - DestinationType: &gloov1.Destination_Kube{ - Kube: &gloov1.KubernetesServiceDestination{ - Ref: &core.ResourceRef{ - Name: "peteszah-service", - Namespace: "peteszah-service-namespace", - }, - Port: 8080, - }, - }, - }, - Weight: &wrappers.UInt32Value{Value: 0x00000064}, - }, - }, - }, - }, - }, - }, - Options: &gloov1.RouteOptions{ - HeaderManipulation: &headers.HeaderManipulation{ - RequestHeadersToAdd: []*envoycore_sk.HeaderValueOption{{HeaderOption: &envoycore_sk.HeaderValueOption_Header{Header: &envoycore_sk.HeaderValue{Key: "add", Value: "me"}}}}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - Name: "https", - BindAddress: "::", - BindPort: 8443, - ListenerType: &gloov1.Listener_HttpListener{ - HttpListener: &gloov1.HttpListener{ - VirtualHosts: []*gloov1.VirtualHost{ - { - Name: "example.ing-tls-0", - Domains: []string{ - "petes.com", - "petes.com:8443", - "zah.net", - "zah.net:8443", - }, - Routes: []*gloov1.Route{ - { - Matchers: []*matchers.Matcher{{ - PathSpecifier: &matchers.Matcher_Regex{ - Regex: "/", - }, - }}, - Action: &gloov1.Route_RouteAction{ - RouteAction: &gloov1.RouteAction{ - Destination: &gloov1.RouteAction_Multi{ - Multi: &gloov1.MultiDestination{ - Destinations: []*gloov1.WeightedDestination{ - { - Destination: &gloov1.Destination{ - DestinationType: &gloov1.Destination_Kube{ - Kube: &gloov1.KubernetesServiceDestination{ - Ref: &core.ResourceRef{ - Name: "peteszah-service", - Namespace: "peteszah-service-namespace", - }, - Port: 8080, - }, - }, - }, - Weight: &wrappers.UInt32Value{Value: 0x00000064}, - }, - }, - }, - }, - }, - }, - Options: &gloov1.RouteOptions{ - HeaderManipulation: &headers.HeaderManipulation{ - RequestHeadersToAdd: []*envoycore_sk.HeaderValueOption{{HeaderOption: &envoycore_sk.HeaderValueOption_Header{Header: &envoycore_sk.HeaderValue{Key: "add", Value: "me"}}}}, - }, - }, - }, - }, - }, - }, - }, - }, - SslConfigurations: []*ssl.SslConfig{ - { - SslSecrets: &ssl.SslConfig_SecretRef{ - SecretRef: &core.ResourceRef{ - Name: "areallygreatsecret", - Namespace: "example", - }, - }, - SniDomains: []string{ - "petes.com", - }, - }, - }, - }, - }, - Metadata: &core.Metadata{ - Name: "clusteringress-proxy", - Namespace: "example", - }, - } - - Expect(proxy).To(test_matchers.MatchProto(expected)) - }) -}) - -func durptr(d int) *duration.Duration { - dur := time.Duration(d) - return ptypes.DurationProto(dur) -} diff --git a/projects/clusteringress/pkg/translator/translator_suite_test.go b/projects/clusteringress/pkg/translator/translator_suite_test.go deleted file mode 100644 index 677a31d29cf..00000000000 --- a/projects/clusteringress/pkg/translator/translator_suite_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package translator_test - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) - -func TestTranslator(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Translator Suite") -} diff --git a/projects/clusteringress/pkg/translator/translator_syncer.go b/projects/clusteringress/pkg/translator/translator_syncer.go deleted file mode 100644 index 1f52cd72514..00000000000 --- a/projects/clusteringress/pkg/translator/translator_syncer.go +++ /dev/null @@ -1,182 +0,0 @@ -package translator - -import ( - "context" - "time" - - "github.com/rotisserie/eris" - "go.uber.org/zap/zapcore" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - knativev1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1" - knativeclient "knative.dev/networking/pkg/client/clientset/versioned/typed/networking/v1alpha1" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/hashutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - - "github.com/solo-io/gloo/pkg/utils/syncutil" - v1alpha1 "github.com/solo-io/gloo/projects/clusteringress/pkg/api/external/knative" - v1 "github.com/solo-io/gloo/projects/clusteringress/pkg/api/v1" - "github.com/solo-io/gloo/projects/gateway/pkg/utils" - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - glooutils "github.com/solo-io/gloo/projects/gloo/pkg/utils" -) - -type translatorSyncer struct { - proxyAddress string - writeNamespace string - writeErrs chan error - proxyClient gloov1.ProxyClient - proxyReconciler gloov1.ProxyReconciler - ingressClient knativeclient.IngressesGetter - statusClient resources.StatusClient -} - -var ( - // labels used to uniquely identify Proxies that are managed by the Gloo controllers - proxyLabelsToWrite = map[string]string{ - glooutils.ProxyTypeKey: glooutils.KnativeProxyValue, - } - - // Previously, proxies would be identified with: - // created_by: knative - // Now, proxies are identified with: - // created_by: gloo-knative - // - // We need to ensure that users can successfully upgrade from versions - // where the previous labels were used, to versions with the new labels. - // Therefore, we watch Proxies with a superset of the old and new labels, and persist Proxies with new labels. - // - // This is only required for backwards compatibility. - // Once users have upgraded to a version with new labels, we can delete this code and read/write the same labels. - // gloo-knative-translator removed in 1.17 - // knative removed in 1.12 - proxyLabelSelectorOptions = clients.ListOpts{ - ExpressionSelector: glooutils.GetTranslatorSelectorExpression(glooutils.KnativeProxyValue, "gloo-knative-translator", "knative"), - } -) - -func NewSyncer(proxyAddress, writeNamespace string, proxyClient gloov1.ProxyClient, ingressClient knativeclient.IngressesGetter, statusClient resources.StatusClient, writeErrs chan error) v1.TranslatorSyncer { - return &translatorSyncer{ - proxyAddress: proxyAddress, - writeNamespace: writeNamespace, - writeErrs: writeErrs, - proxyClient: proxyClient, - ingressClient: ingressClient, - proxyReconciler: gloov1.NewProxyReconciler(proxyClient, statusClient), - statusClient: statusClient, - } -} - -// TODO (ilackarms): make sure that sync happens if proxies get updated as well; may need to resync -func (s *translatorSyncer) Sync(ctx context.Context, snap *v1.TranslatorSnapshot) error { - ctx = contextutils.WithLogger(ctx, "translatorSyncer") - - snapHash := hashutils.MustHash(snap) - logger := contextutils.LoggerFrom(ctx) - logger.Infof("begin sync %v (%v cluster ingresses )", snapHash, - len(snap.Clusteringresses), - ) - defer logger.Infof("end sync %v", snapHash) - - // stringifying the snapshot may be an expensive operation, so we'd like to avoid building the large - // string if we're not even going to log it anyway - if contextutils.GetLogLevel() == zapcore.DebugLevel { - logger.Debug(syncutil.StringifySnapshot(snap)) - } - - proxy, err := translateProxy(ctx, s.writeNamespace, snap) - if err != nil { - logger.Warnf("snapshot %v was rejected due to invalid config: %v\n"+ - "knative ingress proxy will not be updated.", snapHash, err) - return err - } - - var desiredResources gloov1.ProxyList - if proxy != nil { - logger.Infof("creating proxy %v", proxy.GetMetadata().Ref()) - proxy.GetMetadata().Labels = proxyLabelsToWrite - desiredResources = gloov1.ProxyList{proxy} - } - - proxyTransitionFunction := utils.TransitionFunction(s.statusClient) - - if err := s.proxyReconciler.Reconcile(s.writeNamespace, desiredResources, proxyTransitionFunction, clients.ListOpts{ - Ctx: ctx, - Selector: proxyLabelSelectorOptions.Selector, - ExpressionSelector: proxyLabelSelectorOptions.ExpressionSelector, - }); err != nil { - return err - } - - if err := s.propagateProxyStatus(ctx, proxy, snap.Clusteringresses); err != nil { - return eris.Wrapf(err, "failed to propagate proxy status "+ - "to clusteringress objects") - } - - return nil -} - -// propagate to all clusteringresses the status of the proxy -func (s *translatorSyncer) propagateProxyStatus(ctx context.Context, proxy *gloov1.Proxy, clusterIngresses v1alpha1.ClusterIngressList) error { - if proxy == nil { - return nil - } - timeout := time.After(time.Second * 30) - ticker := time.Tick(time.Second / 2) - for { - select { - case <-ctx.Done(): - return nil - case <-timeout: - return eris.Errorf("timed out waiting for proxy status to be updated") - case <-ticker: - // poll the proxy for an accepted or rejected status - updatedProxy, err := s.proxyClient.Read( - proxy.GetMetadata().GetNamespace(), - proxy.GetMetadata().GetName(), - clients.ReadOpts{Ctx: ctx}, - ) - if err != nil { - return err - } - proxyStatus := s.statusClient.GetStatus(updatedProxy) - - switch proxyStatus.GetState() { - case core.Status_Pending: - continue - case core.Status_Rejected: - contextutils.LoggerFrom(ctx).Errorf("proxy was rejected by gloo: %v", proxyStatus.GetReason()) - return nil - case core.Status_Accepted: - return s.markClusterIngressesReady(ctx, clusterIngresses) - } - } - } -} - -func (s *translatorSyncer) markClusterIngressesReady(ctx context.Context, clusterIngresses v1alpha1.ClusterIngressList) error { - var updatedClusterIngresses []*knativev1alpha1.Ingress - for _, wrappedCi := range clusterIngresses { - ci := knativev1alpha1.Ingress(wrappedCi.ClusterIngress) - if ci.Status.ObservedGeneration == ci.ObjectMeta.Generation { - continue - } - ci.Status.InitializeConditions() - ci.Status.MarkNetworkConfigured() - lb := []knativev1alpha1.LoadBalancerIngressStatus{ - {DomainInternal: s.proxyAddress}, - } - ci.Status.MarkLoadBalancerReady(lb, lb) - ci.Status.ObservedGeneration = ci.Generation - updatedClusterIngresses = append(updatedClusterIngresses, &ci) - } - for _, ci := range updatedClusterIngresses { - if _, err := s.ingressClient.Ingresses(ci.Namespace).UpdateStatus(ctx, ci, metav1.UpdateOptions{}); err != nil { - contextutils.LoggerFrom(ctx).Errorf("failed to update ClusterIngress %v status with error %v", ci.Name, err) - } - } - return nil -} diff --git a/projects/clusteringress/pkg/translator/translator_syncer_test.go b/projects/clusteringress/pkg/translator/translator_syncer_test.go deleted file mode 100644 index 6b769d00f79..00000000000 --- a/projects/clusteringress/pkg/translator/translator_syncer_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package translator - -import ( - "context" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" - v1alpha12 "knative.dev/networking/pkg/apis/networking/v1alpha1" - alpha1 "knative.dev/networking/pkg/client/clientset/versioned/typed/networking/v1alpha1" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - - gloostatusutils "github.com/solo-io/gloo/pkg/utils/statusutils" - "github.com/solo-io/gloo/projects/clusteringress/api/external/knative" - v1alpha1 "github.com/solo-io/gloo/projects/clusteringress/pkg/api/external/knative" - v1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" -) - -var _ = Describe("TranslatorSyncer", func() { - It("propagates successful proxy status to the clusteringresses it was created from", func() { - ctx, cancel := context.WithCancel(context.Background()) - defer func() { cancel() }() - - proxyAddress := "proxy-address" - namespace := "write-namespace" - - statusClient := gloostatusutils.GetStatusClientFromEnvOrDefault(namespace) - proxyClient, _ := v1.NewProxyClient(ctx, &factory.MemoryResourceClientFactory{Cache: memory.NewInMemoryResourceCache()}) - clusterIngress := &v1alpha1.ClusterIngress{ClusterIngress: knative.ClusterIngress{ - ObjectMeta: metav1.ObjectMeta{Generation: 1}, - }} - - knativeClient := &mockIngressesGetter{ - ciClient: &mockCiClient{ci: toKube(clusterIngress)}} - - syncer := NewSyncer(proxyAddress, namespace, proxyClient, knativeClient, statusClient, make(chan error)).(*translatorSyncer) - proxy := &v1.Proxy{Metadata: &core.Metadata{Name: "hi", Namespace: "howareyou"}} - proxy, _ = proxyClient.Write(proxy, clients.WriteOpts{}) - - go func() { - defer GinkgoRecover() - // update status after a 1s sleep - time.Sleep(time.Second / 5) - statusClient.SetStatus(proxy, &core.Status{ - State: core.Status_Accepted, - }) - _, err := proxyClient.Write(proxy, clients.WriteOpts{OverwriteExisting: true}) - Expect(err).NotTo(HaveOccurred()) - }() - - err := syncer.propagateProxyStatus(context.TODO(), proxy, v1alpha1.ClusterIngressList{clusterIngress}) - Expect(err).NotTo(HaveOccurred()) - - var ci *v1alpha12.Ingress - ci, err = knativeClient.ciClient.Get(ctx, clusterIngress.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - Expect(ci.IsReady()).To(BeTrue()) - }) -}) - -type mockIngressesGetter struct{ ciClient alpha1.IngressInterface } - -func (m *mockIngressesGetter) Ingresses(namespace string) alpha1.IngressInterface { - return m.ciClient -} - -func toKube(ci *v1alpha1.ClusterIngress) *v1alpha12.Ingress { - kubeCi := v1alpha12.Ingress(ci.ClusterIngress) - return &kubeCi -} - -type mockCiClient struct{ ci *v1alpha12.Ingress } - -func (c *mockCiClient) UpdateStatus(ctx context.Context, ingress *v1alpha12.Ingress, opts metav1.UpdateOptions) (*v1alpha12.Ingress, error) { - c.ci.Status = ingress.Status - return ingress, nil -} - -func (*mockCiClient) Create(ctx context.Context, ingress *v1alpha12.Ingress, opts metav1.CreateOptions) (*v1alpha12.Ingress, error) { - panic("implement me") -} - -func (*mockCiClient) Update(ctx context.Context, ingress *v1alpha12.Ingress, opts metav1.UpdateOptions) (*v1alpha12.Ingress, error) { - panic("implement me") -} - -func (*mockCiClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - panic("implement me") -} - -func (*mockCiClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - panic("implement me") -} - -func (c *mockCiClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1alpha12.Ingress, error) { - return c.ci, nil -} - -func (*mockCiClient) List(ctx context.Context, opts metav1.ListOptions) (*v1alpha12.IngressList, error) { - panic("implement me") -} - -func (*mockCiClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - panic("implement me") -} - -func (*mockCiClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1alpha12.Ingress, err error) { - panic("implement me") -} diff --git a/projects/ingress/api/v1/ingress.proto b/projects/ingress/api/v1/ingress.proto deleted file mode 100644 index e6cb95b3123..00000000000 --- a/projects/ingress/api/v1/ingress.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; -package ingress.solo.io; -option go_package = "github.com/solo-io/gloo/projects/ingress/pkg/api/v1"; - -import "google/protobuf/any.proto"; - -import "github.com/solo-io/solo-kit/api/v1/metadata.proto"; -import "github.com/solo-io/solo-kit/api/v1/solo-kit.proto"; -import "extproto/ext.proto"; -option (extproto.hash_all) = true; -option (extproto.clone_all) = true; -option (extproto.equal_all) = true; -/* -A simple wrapper for a Kubernetes Ingress Object. -*/ -message Ingress { - - option (core.solo.io.resource).short_name = "ig"; - option (core.solo.io.resource).plural_name = "ingresses"; - // a raw byte representation of the kubernetes ingress this resource wraps - google.protobuf.Any kube_ingress_spec = 1; - // a raw byte representation of the ingress status of the kubernetes ingress object - google.protobuf.Any kube_ingress_status = 2 [(extproto.skip_hashing) = true]; - - // Metadata contains the object metadata for this resource - core.solo.io.Metadata metadata = 7; -} \ No newline at end of file diff --git a/projects/ingress/api/v1/service.proto b/projects/ingress/api/v1/service.proto deleted file mode 100644 index e41a1feb006..00000000000 --- a/projects/ingress/api/v1/service.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; -package ingress.solo.io; -option go_package = "github.com/solo-io/gloo/projects/ingress/pkg/api/v1"; - -import "google/protobuf/any.proto"; - -import "github.com/solo-io/solo-kit/api/v1/metadata.proto"; -import "github.com/solo-io/solo-kit/api/v1/solo-kit.proto"; - -import "extproto/ext.proto"; -option (extproto.hash_all) = true; -option (extproto.clone_all) = true; -option (extproto.equal_all) = true; -/* -A simple wrapper for a Kubernetes Service Object. -*/ -message KubeService{ - - option (core.solo.io.resource).short_name = "sv"; - option (core.solo.io.resource).plural_name = "services"; - // a raw byte representation of the kubernetes service this resource wraps - google.protobuf.Any kube_service_spec = 1; - // a raw byte representation of the service status of the kubernetes service object - google.protobuf.Any kube_service_status = 2; - - // Metadata contains the object metadata for this resource - core.solo.io.Metadata metadata = 7; -} diff --git a/projects/ingress/api/v1/solo-kit.json b/projects/ingress/api/v1/solo-kit.json deleted file mode 100644 index 50f82e4f592..00000000000 --- a/projects/ingress/api/v1/solo-kit.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "name": "ingress.solo.io", - "version": "v1", - "resource_groups": { - "translator.ingress.solo.io": [ - { - "name": "Upstream", - "package": "gloo.solo.io" - }, - { - "name": "KubeService", - "package": "ingress.solo.io" - }, - { - "name": "Ingress", - "package": "ingress.solo.io" - } - ], - "status.ingress.solo.io": [ - { - "name": "KubeService", - "package": "ingress.solo.io" - }, - { - "name": "Ingress", - "package": "ingress.solo.io" - } - ] - } -} \ No newline at end of file diff --git a/projects/ingress/cmd/Dockerfile b/projects/ingress/cmd/Dockerfile deleted file mode 100644 index df8ae4120b4..00000000000 --- a/projects/ingress/cmd/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -ARG BASE_IMAGE - -FROM $BASE_IMAGE - -ARG GOARCH=amd64 - -RUN apk -U upgrade - -COPY ingress-linux-$GOARCH /usr/local/bin/ingress - -USER 10101 - -ENTRYPOINT ["/usr/local/bin/ingress"] \ No newline at end of file diff --git a/projects/ingress/cmd/Dockerfile.distroless b/projects/ingress/cmd/Dockerfile.distroless deleted file mode 100644 index 1e6c9b6e4ab..00000000000 --- a/projects/ingress/cmd/Dockerfile.distroless +++ /dev/null @@ -1,11 +0,0 @@ -ARG BASE_IMAGE - -FROM $BASE_IMAGE - -ARG GOARCH=amd64 - -COPY ingress-linux-$GOARCH /usr/local/bin/ingress - -USER 10101 - -ENTRYPOINT ["/usr/local/bin/ingress"] \ No newline at end of file diff --git a/projects/ingress/cmd/main.go b/projects/ingress/cmd/main.go deleted file mode 100644 index 242f26d740e..00000000000 --- a/projects/ingress/cmd/main.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "github.com/solo-io/gloo/projects/ingress/pkg/setup" - "github.com/solo-io/go-utils/log" -) - -func main() { - if err := setup.Main(nil); err != nil { - log.Fatalf("err in main: %v", err.Error()) - } -} diff --git a/projects/ingress/pkg/api/ingress/resource_client.go b/projects/ingress/pkg/api/ingress/resource_client.go deleted file mode 100644 index 726a89eeb98..00000000000 --- a/projects/ingress/pkg/api/ingress/resource_client.go +++ /dev/null @@ -1,324 +0,0 @@ -package ingress - -import ( - "context" - "encoding/json" - "reflect" - "sort" - "time" - - "github.com/golang/protobuf/ptypes/any" - v1 "github.com/solo-io/gloo/projects/ingress/pkg/api/v1" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/errors" - "github.com/solo-io/solo-kit/pkg/utils/kubeutils" - networkingv1 "k8s.io/api/networking/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - kubewatch "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes" -) - -const typeUrl = "k8s.io/networking.v1/Ingress" - -type ResourceClient struct { - kube kubernetes.Interface - ownerLabel string - resourceName string - resourceType resources.Resource -} - -func NewResourceClient(kube kubernetes.Interface, resourceType resources.Resource) *ResourceClient { - return &ResourceClient{ - kube: kube, - resourceName: reflect.TypeOf(resourceType).String(), - resourceType: resourceType, - } -} - -func FromKube(ingress *networkingv1.Ingress) (*v1.Ingress, error) { - rawSpec, err := json.Marshal(ingress.Spec) - if err != nil { - return nil, errors.Wrapf(err, "marshalling kube ingress object") - } - spec := &any.Any{ - TypeUrl: typeUrl, - Value: rawSpec, - } - - rawStatus, err := json.Marshal(ingress.Status) - if err != nil { - return nil, errors.Wrapf(err, "marshalling kube ingress object") - } - status := &any.Any{ - TypeUrl: typeUrl, - Value: rawStatus, - } - - resource := &v1.Ingress{ - KubeIngressSpec: spec, - KubeIngressStatus: status, - } - - resource.SetMetadata(kubeutils.FromKubeMeta(ingress.ObjectMeta, true)) - - return resource, nil -} - -func ToKube(resource resources.Resource) (*networkingv1.Ingress, error) { - ingResource, ok := resource.(*v1.Ingress) - if !ok { - return nil, errors.Errorf("internal error: invalid resource %v passed to ingress-only client", resources.Kind(resource)) - } - if ingResource.GetKubeIngressSpec() == nil { - return nil, errors.Errorf("internal error: %v ingress spec cannot be nil", ingResource.GetMetadata().Ref()) - } - var ingress networkingv1.Ingress - if err := json.Unmarshal(ingResource.GetKubeIngressSpec().GetValue(), &ingress.Spec); err != nil { - return nil, errors.Wrapf(err, "unmarshalling kube ingress spec data") - } - if ingResource.GetKubeIngressStatus() != nil { - if err := json.Unmarshal(ingResource.GetKubeIngressStatus().GetValue(), &ingress.Status); err != nil { - return nil, errors.Wrapf(err, "unmarshalling kube ingress status data") - } - } - - meta := kubeutils.ToKubeMeta(resource.GetMetadata()) - if meta.Annotations == nil { - meta.Annotations = make(map[string]string) - } - ingress.ObjectMeta = meta - return &ingress, nil -} - -var _ clients.ResourceClient = &ResourceClient{} - -func (rc *ResourceClient) Kind() string { - return resources.Kind(rc.resourceType) -} - -func (rc *ResourceClient) NewResource() resources.Resource { - return resources.Clone(rc.resourceType) -} - -func (rc *ResourceClient) Register() error { - return nil -} - -func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { - if err := resources.ValidateName(name); err != nil { - return nil, errors.Wrapf(err, "validation error") - } - opts = opts.WithDefaults() - namespace = clients.DefaultNamespaceIfEmpty(namespace) - - ingressObj, err := rc.kube.NetworkingV1().Ingresses(namespace).Get(opts.Ctx, name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return nil, errors.NewNotExistErr(namespace, name, err) - } - return nil, errors.Wrapf(err, "reading ingressObj from kubernetes") - } - resource, err := FromKube(ingressObj) - if err != nil { - return nil, err - } - if resource == nil { - return nil, errors.Errorf("ingressObj %v is not kind %v", name, rc.Kind()) - } - return resource, nil -} - -func (rc *ResourceClient) Write(resource resources.Resource, opts clients.WriteOpts) (resources.Resource, error) { - updated, err := rc.write(resource, opts) - if err != nil { - return nil, err - } - // workaround for setting ingress status - clone := resources.Clone(resource) - clone.SetMetadata(updated.GetMetadata()) - return rc.writeStatus(clone, opts) -} - -func (rc *ResourceClient) ApplyStatus(statusClient resources.StatusClient, inputResource resources.InputResource, opts clients.ApplyStatusOpts) (resources.Resource, error) { - wopts := clients.WriteOpts{} - wopts = wopts.WithDefaults() - wopts.Ctx = opts.Ctx - return rc.writeStatus(inputResource, wopts) -} - -func (rc *ResourceClient) write(resource resources.Resource, opts clients.WriteOpts) (resources.Resource, error) { - opts = opts.WithDefaults() - if err := resources.Validate(resource); err != nil { - return nil, errors.Wrapf(err, "validation error") - } - meta := resource.GetMetadata() - meta.Namespace = clients.DefaultNamespaceIfEmpty(meta.GetNamespace()) - - // mutate and return clone - clone := resources.Clone(resource) - clone.SetMetadata(meta) - ingressObj, err := ToKube(resource) - if err != nil { - return nil, err - } - - original, err := rc.Read(meta.GetNamespace(), meta.GetName(), clients.ReadOpts{ - Ctx: opts.Ctx, - }) - if original != nil && err == nil { - if !opts.OverwriteExisting { - return nil, errors.NewExistErr(meta) - } - if meta.GetResourceVersion() != original.GetMetadata().GetResourceVersion() { - return nil, errors.NewResourceVersionErr(meta.GetNamespace(), meta.GetName(), meta.GetResourceVersion(), original.GetMetadata().GetResourceVersion()) - } - if _, err := rc.kube.NetworkingV1().Ingresses(ingressObj.Namespace).Update(opts.Ctx, ingressObj, metav1.UpdateOptions{}); err != nil { - return nil, errors.Wrapf(err, "updating kube ingressObj %v", ingressObj.Name) - } - } else { - if _, err := rc.kube.NetworkingV1().Ingresses(ingressObj.Namespace).Create(opts.Ctx, ingressObj, metav1.CreateOptions{}); err != nil { - return nil, errors.Wrapf(err, "creating kube ingressObj %v", ingressObj.Name) - } - } - - // return a read object to update the resource version - return rc.Read(ingressObj.Namespace, ingressObj.Name, clients.ReadOpts{Ctx: opts.Ctx}) -} - -func (rc *ResourceClient) writeStatus(resource resources.Resource, opts clients.WriteOpts) (resources.Resource, error) { - opts = opts.WithDefaults() - if err := resources.Validate(resource); err != nil { - return nil, errors.Wrapf(err, "validation error") - } - meta := resource.GetMetadata() - meta.Namespace = clients.DefaultNamespaceIfEmpty(meta.GetNamespace()) - - // mutate and return clone - clone := resources.Clone(resource) - clone.SetMetadata(meta) - ingressObj, err := ToKube(resource) - if err != nil { - return nil, err - } - - original, err := rc.Read(meta.GetNamespace(), meta.GetName(), clients.ReadOpts{ - Ctx: opts.Ctx, - }) - if original != nil && err == nil { - if !opts.OverwriteExisting { - return nil, errors.NewExistErr(meta) - } - if meta.GetResourceVersion() != original.GetMetadata().GetResourceVersion() { - return nil, errors.NewResourceVersionErr(meta.GetNamespace(), meta.GetName(), meta.GetResourceVersion(), original.GetMetadata().GetResourceVersion()) - } - if _, err := rc.kube.NetworkingV1().Ingresses(ingressObj.Namespace).UpdateStatus(opts.Ctx, ingressObj, metav1.UpdateOptions{}); err != nil { - return nil, errors.Wrapf(err, "updating kube ingressObj status %v", ingressObj.Name) - } - } else { - if _, err := rc.kube.NetworkingV1().Ingresses(ingressObj.Namespace).Create(opts.Ctx, ingressObj, metav1.CreateOptions{}); err != nil { - return nil, errors.Wrapf(err, "creating kube ingressObj status %v", ingressObj.Name) - } - } - - // return a read object to update the resource version - return rc.Read(ingressObj.Namespace, ingressObj.Name, clients.ReadOpts{Ctx: opts.Ctx}) -} - -func (rc *ResourceClient) Delete(namespace, name string, opts clients.DeleteOpts) error { - opts = opts.WithDefaults() - if !rc.exist(opts.Ctx, namespace, name) { - if !opts.IgnoreNotExist { - return errors.NewNotExistErr(namespace, name) - } - return nil - } - - if err := rc.kube.NetworkingV1().Ingresses(namespace).Delete(opts.Ctx, name, metav1.DeleteOptions{}); err != nil { - return errors.Wrapf(err, "deleting ingressObj %v", name) - } - return nil -} - -func (rc *ResourceClient) List(namespace string, opts clients.ListOpts) (resources.ResourceList, error) { - opts = opts.WithDefaults() - - ingressObjList, err := rc.kube.NetworkingV1().Ingresses(namespace).List(opts.Ctx, metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet(opts.Selector).String(), - }) - if err != nil { - return nil, errors.Wrapf(err, "listing ingressObjs in %v", namespace) - } - var resourceList resources.ResourceList - for _, ingressObj := range ingressObjList.Items { - resource, err := FromKube(&ingressObj) - if err != nil { - return nil, err - } - if resource == nil { - continue - } - resourceList = append(resourceList, resource) - } - - sort.SliceStable(resourceList, func(i, j int) bool { - return resourceList[i].GetMetadata().GetName() < resourceList[j].GetMetadata().GetName() - }) - - return resourceList, nil -} - -func (rc *ResourceClient) Watch(namespace string, opts clients.WatchOpts) (<-chan resources.ResourceList, <-chan error, error) { - opts = opts.WithDefaults() - watch, err := rc.kube.NetworkingV1().Ingresses(namespace).Watch(opts.Ctx, metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet(opts.Selector).String(), - }) - if err != nil { - return nil, nil, errors.Wrapf(err, "initiating kube watch in %v", namespace) - } - resourcesChan := make(chan resources.ResourceList) - errs := make(chan error) - updateResourceList := func() { - list, err := rc.List(namespace, clients.ListOpts{ - Ctx: opts.Ctx, - Selector: opts.Selector, - }) - if err != nil { - errs <- err - return - } - resourcesChan <- list - } - - go func() { - // watch should open up with an initial read - updateResourceList() - for { - select { - case <-time.After(opts.RefreshRate): - updateResourceList() - case event := <-watch.ResultChan(): - switch event.Type { - case kubewatch.Error: - errs <- errors.Errorf("error during watch: %v", event) - default: - updateResourceList() - } - case <-opts.Ctx.Done(): - watch.Stop() - close(resourcesChan) - close(errs) - return - } - } - }() - - return resourcesChan, errs, nil -} - -func (rc *ResourceClient) exist(ctx context.Context, namespace, name string) bool { - _, err := rc.kube.NetworkingV1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{}) - return err == nil -} diff --git a/projects/ingress/pkg/api/service/resource_client.go b/projects/ingress/pkg/api/service/resource_client.go deleted file mode 100644 index 2bdc0af7b60..00000000000 --- a/projects/ingress/pkg/api/service/resource_client.go +++ /dev/null @@ -1,299 +0,0 @@ -package service - -import ( - "context" - "encoding/json" - "reflect" - "sort" - "time" - - "github.com/golang/protobuf/ptypes/any" - v1 "github.com/solo-io/gloo/projects/ingress/pkg/api/v1" - "github.com/solo-io/solo-kit/pkg/api/shared" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/errors" - "github.com/solo-io/solo-kit/pkg/utils/kubeutils" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - kubewatch "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes" -) - -const typeUrl = "k8s.io/core.v1/Service" - -type ResourceClient struct { - kube kubernetes.Interface - ownerLabel string - resourceName string - resourceType resources.Resource -} - -func NewResourceClient(kube kubernetes.Interface, resourceType resources.Resource) *ResourceClient { - return &ResourceClient{ - kube: kube, - resourceName: reflect.TypeOf(resourceType).String(), - resourceType: resourceType, - } -} - -func FromKube(svc *corev1.Service) (*v1.KubeService, error) { - rawSpec, err := json.Marshal(svc.Spec) - if err != nil { - return nil, errors.Wrapf(err, "marshalling kube svc object") - } - spec := &any.Any{ - TypeUrl: typeUrl, - Value: rawSpec, - } - - rawStatus, err := json.Marshal(svc.Status) - if err != nil { - return nil, errors.Wrapf(err, "marshalling kube svc object") - } - status := &any.Any{ - TypeUrl: typeUrl, - Value: rawStatus, - } - - resource := &v1.KubeService{ - KubeServiceSpec: spec, - KubeServiceStatus: status, - } - - resource.SetMetadata(kubeutils.FromKubeMeta(svc.ObjectMeta, true)) - - return resource, nil -} - -func ToKube(resource resources.Resource) (*corev1.Service, error) { - ingResource, ok := resource.(*v1.KubeService) - if !ok { - return nil, errors.Errorf("internal error: invalid resource %v passed to svc-only client", resources.Kind(resource)) - } - if ingResource.GetKubeServiceSpec() == nil { - return nil, errors.Errorf("internal error: %v svc spec cannot be nil", ingResource.GetMetadata().Ref()) - } - var svc corev1.Service - if err := json.Unmarshal(ingResource.GetKubeServiceSpec().GetValue(), &svc.Spec); err != nil { - return nil, errors.Wrapf(err, "unmarshalling kube svc spec data") - } - if ingResource.GetKubeServiceStatus() != nil { - if err := json.Unmarshal(ingResource.GetKubeServiceStatus().GetValue(), &svc.Status); err != nil { - return nil, errors.Wrapf(err, "unmarshalling kube svc status data") - } - } - - meta := kubeutils.ToKubeMeta(resource.GetMetadata()) - if meta.Annotations == nil { - meta.Annotations = make(map[string]string) - } - svc.ObjectMeta = meta - return &svc, nil -} - -var _ clients.ResourceClient = &ResourceClient{} - -func (rc *ResourceClient) Kind() string { - return resources.Kind(rc.resourceType) -} - -func (rc *ResourceClient) NewResource() resources.Resource { - return resources.Clone(rc.resourceType) -} - -func (rc *ResourceClient) Register() error { - return nil -} - -func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { - if err := resources.ValidateName(name); err != nil { - return nil, errors.Wrapf(err, "validation error") - } - opts = opts.WithDefaults() - namespace = clients.DefaultNamespaceIfEmpty(namespace) - - svcObj, err := rc.kube.CoreV1().Services(namespace).Get(opts.Ctx, name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return nil, errors.NewNotExistErr(namespace, name, err) - } - return nil, errors.Wrapf(err, "reading svcObj from kubernetes") - } - resource, err := FromKube(svcObj) - if err != nil { - return nil, err - } - if resource == nil { - return nil, errors.Errorf("svcObj %v is not kind %v", name, rc.Kind()) - } - return resource, nil -} - -func (rc *ResourceClient) Write(resource resources.Resource, opts clients.WriteOpts) (resources.Resource, error) { - opts = opts.WithDefaults() - if err := resources.Validate(resource); err != nil { - return nil, errors.Wrapf(err, "validation error") - } - meta := resource.GetMetadata() - meta.Namespace = clients.DefaultNamespaceIfEmpty(meta.GetNamespace()) - - // mutate and return clone - clone := resources.Clone(resource) - clone.SetMetadata(meta) - svcObj, err := ToKube(resource) - if err != nil { - return nil, err - } - - original, err := rc.Read(meta.GetNamespace(), meta.GetName(), clients.ReadOpts{ - Ctx: opts.Ctx, - }) - if original != nil && err == nil { - if !opts.OverwriteExisting { - return nil, errors.NewExistErr(meta) - } - if meta.GetResourceVersion() != original.GetMetadata().GetResourceVersion() { - return nil, errors.NewResourceVersionErr(meta.GetNamespace(), meta.GetName(), meta.GetResourceVersion(), original.GetMetadata().GetResourceVersion()) - } - if _, err := rc.kube.CoreV1().Services(svcObj.Namespace).Update(opts.Ctx, svcObj, metav1.UpdateOptions{}); err != nil { - return nil, errors.Wrapf(err, "updating kube svcObj %v", svcObj.Name) - } - } else { - if _, err := rc.kube.CoreV1().Services(svcObj.Namespace).Create(opts.Ctx, svcObj, metav1.CreateOptions{}); err != nil { - return nil, errors.Wrapf(err, "creating kube svcObj %v", svcObj.Name) - } - } - - // return a read object to update the resource version - return rc.Read(svcObj.Namespace, svcObj.Name, clients.ReadOpts{Ctx: opts.Ctx}) -} - -func (rc *ResourceClient) ApplyStatus(statusClient resources.StatusClient, inputResource resources.InputResource, opts clients.ApplyStatusOpts) (resources.Resource, error) { - name := inputResource.GetMetadata().GetName() - namespace := inputResource.GetMetadata().GetNamespace() - if err := resources.ValidateName(name); err != nil { - return nil, errors.Wrapf(err, "validation error") - } - opts = opts.WithDefaults() - - data, err := shared.GetJsonPatchData(opts.Ctx, inputResource) - if err != nil { - return nil, errors.Wrapf(err, "error getting status json patch data") - } - serviceObj, err := rc.kube.CoreV1().Services(namespace).Patch(opts.Ctx, name, types.JSONPatchType, data, metav1.PatchOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return nil, errors.NewNotExistErr(namespace, name, err) - } - return nil, errors.Wrapf(err, "patching serviceObj from kubernetes") - } - resource, err := FromKube(serviceObj) - if err != nil { - return nil, err - } - - if resource == nil { - return nil, errors.Errorf("serviceObj %v is not kind %v", name, rc.Kind()) - } - return resource, nil -} - -func (rc *ResourceClient) Delete(namespace, name string, opts clients.DeleteOpts) error { - opts = opts.WithDefaults() - if !rc.exist(opts.Ctx, namespace, name) { - if !opts.IgnoreNotExist { - return errors.NewNotExistErr(namespace, name) - } - return nil - } - - if err := rc.kube.CoreV1().Services(namespace).Delete(opts.Ctx, name, metav1.DeleteOptions{}); err != nil { - return errors.Wrapf(err, "deleting svcObj %v", name) - } - return nil -} - -func (rc *ResourceClient) List(namespace string, opts clients.ListOpts) (resources.ResourceList, error) { - opts = opts.WithDefaults() - - svcObjList, err := rc.kube.CoreV1().Services(namespace).List(opts.Ctx, metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet(opts.Selector).String(), - }) - if err != nil { - return nil, errors.Wrapf(err, "listing svcObjs in %v", namespace) - } - var resourceList resources.ResourceList - for _, svcObj := range svcObjList.Items { - resource, err := FromKube(&svcObj) - if err != nil { - return nil, err - } - if resource == nil { - continue - } - resourceList = append(resourceList, resource) - } - - sort.SliceStable(resourceList, func(i, j int) bool { - return resourceList[i].GetMetadata().GetName() < resourceList[j].GetMetadata().GetName() - }) - - return resourceList, nil -} - -func (rc *ResourceClient) Watch(namespace string, opts clients.WatchOpts) (<-chan resources.ResourceList, <-chan error, error) { - opts = opts.WithDefaults() - watch, err := rc.kube.CoreV1().Services(namespace).Watch(opts.Ctx, metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet(opts.Selector).String(), - }) - if err != nil { - return nil, nil, errors.Wrapf(err, "initiating kube watch in %v", namespace) - } - resourcesChan := make(chan resources.ResourceList) - errs := make(chan error) - updateResourceList := func() { - list, err := rc.List(namespace, clients.ListOpts{ - Ctx: opts.Ctx, - Selector: opts.Selector, - }) - if err != nil { - errs <- err - return - } - resourcesChan <- list - } - - go func() { - // watch should open up with an initial read - updateResourceList() - for { - select { - case <-time.After(opts.RefreshRate): - updateResourceList() - case event := <-watch.ResultChan(): - switch event.Type { - case kubewatch.Error: - errs <- errors.Errorf("error during watch: %v", event) - default: - updateResourceList() - } - case <-opts.Ctx.Done(): - watch.Stop() - close(resourcesChan) - close(errs) - return - } - } - }() - - return resourcesChan, errs, nil -} - -func (rc *ResourceClient) exist(ctx context.Context, namespace, name string) bool { - _, err := rc.kube.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) - return err == nil -} diff --git a/projects/ingress/pkg/api/service/service_client.go b/projects/ingress/pkg/api/service/service_client.go deleted file mode 100644 index c2b102ac65d..00000000000 --- a/projects/ingress/pkg/api/service/service_client.go +++ /dev/null @@ -1,23 +0,0 @@ -package service - -import ( - v1 "github.com/solo-io/gloo/projects/ingress/pkg/api/v1" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" -) - -// TODO (ilackarms): consider adding generators for these kind of clients to solo-kit - -type ClientWithSelector struct { - v1.KubeServiceClient - Selector map[string]string -} - -func NewClientWithSelector(kubeServiceClient v1.KubeServiceClient, selector map[string]string) v1.KubeServiceClient { - return &ClientWithSelector{KubeServiceClient: kubeServiceClient, Selector: selector} -} - -func (c *ClientWithSelector) Watch(namespace string, opts clients.WatchOpts) (<-chan v1.KubeServiceList, <-chan error, error) { - // override selector - opts.Selector = c.Selector - return c.KubeServiceClient.Watch(namespace, opts) -} diff --git a/projects/ingress/pkg/api/v1/ingress.pb.clone.go b/projects/ingress/pkg/api/v1/ingress.pb.clone.go deleted file mode 100644 index 7246ed6a11b..00000000000 --- a/projects/ingress/pkg/api/v1/ingress.pb.clone.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by protoc-gen-ext. DO NOT EDIT. -// source: github.com/solo-io/gloo/projects/ingress/api/v1/ingress.proto - -package v1 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "strings" - - "github.com/solo-io/protoc-gen-ext/pkg/clone" - "google.golang.org/protobuf/proto" - - github_com_solo_io_solo_kit_pkg_api_v1_resources_core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - - google_golang_org_protobuf_types_known_anypb "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = errors.New("") - _ = fmt.Print - _ = binary.LittleEndian - _ = bytes.Compare - _ = strings.Compare - _ = clone.Cloner(nil) - _ = proto.Message(nil) -) - -// Clone function -func (m *Ingress) Clone() proto.Message { - var target *Ingress - if m == nil { - return target - } - target = &Ingress{} - - if h, ok := interface{}(m.GetKubeIngressSpec()).(clone.Cloner); ok { - target.KubeIngressSpec = h.Clone().(*google_golang_org_protobuf_types_known_anypb.Any) - } else { - target.KubeIngressSpec = proto.Clone(m.GetKubeIngressSpec()).(*google_golang_org_protobuf_types_known_anypb.Any) - } - - if h, ok := interface{}(m.GetKubeIngressStatus()).(clone.Cloner); ok { - target.KubeIngressStatus = h.Clone().(*google_golang_org_protobuf_types_known_anypb.Any) - } else { - target.KubeIngressStatus = proto.Clone(m.GetKubeIngressStatus()).(*google_golang_org_protobuf_types_known_anypb.Any) - } - - if h, ok := interface{}(m.GetMetadata()).(clone.Cloner); ok { - target.Metadata = h.Clone().(*github_com_solo_io_solo_kit_pkg_api_v1_resources_core.Metadata) - } else { - target.Metadata = proto.Clone(m.GetMetadata()).(*github_com_solo_io_solo_kit_pkg_api_v1_resources_core.Metadata) - } - - return target -} diff --git a/projects/ingress/pkg/api/v1/ingress.pb.equal.go b/projects/ingress/pkg/api/v1/ingress.pb.equal.go deleted file mode 100644 index 06f60b1f9e4..00000000000 --- a/projects/ingress/pkg/api/v1/ingress.pb.equal.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by protoc-gen-ext. DO NOT EDIT. -// source: github.com/solo-io/gloo/projects/ingress/api/v1/ingress.proto - -package v1 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - equality "github.com/solo-io/protoc-gen-ext/pkg/equality" -) - -// ensure the imports are used -var ( - _ = errors.New("") - _ = fmt.Print - _ = binary.LittleEndian - _ = bytes.Compare - _ = strings.Compare - _ = equality.Equalizer(nil) - _ = proto.Message(nil) -) - -// Equal function -func (m *Ingress) Equal(that interface{}) bool { - if that == nil { - return m == nil - } - - target, ok := that.(*Ingress) - if !ok { - that2, ok := that.(Ingress) - if ok { - target = &that2 - } else { - return false - } - } - if target == nil { - return m == nil - } else if m == nil { - return false - } - - if h, ok := interface{}(m.GetKubeIngressSpec()).(equality.Equalizer); ok { - if !h.Equal(target.GetKubeIngressSpec()) { - return false - } - } else { - if !proto.Equal(m.GetKubeIngressSpec(), target.GetKubeIngressSpec()) { - return false - } - } - - if h, ok := interface{}(m.GetKubeIngressStatus()).(equality.Equalizer); ok { - if !h.Equal(target.GetKubeIngressStatus()) { - return false - } - } else { - if !proto.Equal(m.GetKubeIngressStatus(), target.GetKubeIngressStatus()) { - return false - } - } - - if h, ok := interface{}(m.GetMetadata()).(equality.Equalizer); ok { - if !h.Equal(target.GetMetadata()) { - return false - } - } else { - if !proto.Equal(m.GetMetadata(), target.GetMetadata()) { - return false - } - } - - return true -} diff --git a/projects/ingress/pkg/api/v1/ingress.pb.go b/projects/ingress/pkg/api/v1/ingress.pb.go deleted file mode 100644 index 596bcc98977..00000000000 --- a/projects/ingress/pkg/api/v1/ingress.pb.go +++ /dev/null @@ -1,183 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.35.1 -// protoc v3.6.1 -// source: github.com/solo-io/gloo/projects/ingress/api/v1/ingress.proto - -package v1 - -import ( - reflect "reflect" - sync "sync" - - _ "github.com/solo-io/protoc-gen-ext/extproto" - core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// A simple wrapper for a Kubernetes Ingress Object. -type Ingress struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // a raw byte representation of the kubernetes ingress this resource wraps - KubeIngressSpec *anypb.Any `protobuf:"bytes,1,opt,name=kube_ingress_spec,json=kubeIngressSpec,proto3" json:"kube_ingress_spec,omitempty"` - // a raw byte representation of the ingress status of the kubernetes ingress object - KubeIngressStatus *anypb.Any `protobuf:"bytes,2,opt,name=kube_ingress_status,json=kubeIngressStatus,proto3" json:"kube_ingress_status,omitempty"` - // Metadata contains the object metadata for this resource - Metadata *core.Metadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"` -} - -func (x *Ingress) Reset() { - *x = Ingress{} - mi := &file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Ingress) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Ingress) ProtoMessage() {} - -func (x *Ingress) ProtoReflect() protoreflect.Message { - mi := &file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Ingress.ProtoReflect.Descriptor instead. -func (*Ingress) Descriptor() ([]byte, []int) { - return file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDescGZIP(), []int{0} -} - -func (x *Ingress) GetKubeIngressSpec() *anypb.Any { - if x != nil { - return x.KubeIngressSpec - } - return nil -} - -func (x *Ingress) GetKubeIngressStatus() *anypb.Any { - if x != nil { - return x.KubeIngressStatus - } - return nil -} - -func (x *Ingress) GetMetadata() *core.Metadata { - if x != nil { - return x.Metadata - } - return nil -} - -var File_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto protoreflect.FileDescriptor - -var file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDesc = []byte{ - 0x0a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6f, 0x6c, - 0x6f, 0x2d, 0x69, 0x6f, 0x2f, 0x67, 0x6c, 0x6f, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x0f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x73, 0x6f, 0x6c, 0x6f, 0x2e, 0x69, 0x6f, - 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x31, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6f, 0x6c, 0x6f, 0x2d, 0x69, 0x6f, 0x2f, - 0x73, 0x6f, 0x6c, 0x6f, 0x2d, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x31, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6f, 0x6c, 0x6f, 0x2d, - 0x69, 0x6f, 0x2f, 0x73, 0x6f, 0x6c, 0x6f, 0x2d, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x31, 0x2f, 0x73, 0x6f, 0x6c, 0x6f, 0x2d, 0x6b, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x12, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x78, 0x74, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x01, 0x0a, 0x07, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, - 0x73, 0x12, 0x40, 0x0a, 0x11, 0x6b, 0x75, 0x62, 0x65, 0x5f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, - 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x52, 0x0f, 0x6b, 0x75, 0x62, 0x65, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, - 0x70, 0x65, 0x63, 0x12, 0x4a, 0x0a, 0x13, 0x6b, 0x75, 0x62, 0x65, 0x5f, 0x69, 0x6e, 0x67, 0x72, - 0x65, 0x73, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x42, 0x04, 0xb8, 0xf5, 0x04, 0x01, 0x52, 0x11, 0x6b, 0x75, - 0x62, 0x65, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x32, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x73, 0x6f, 0x6c, 0x6f, 0x2e, 0x69, 0x6f, - 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x3a, 0x13, 0x82, 0xf1, 0x04, 0x0f, 0x0a, 0x02, 0x69, 0x67, 0x12, 0x09, 0x69, - 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x42, 0x41, 0xb8, 0xf5, 0x04, 0x01, 0xc0, 0xf5, - 0x04, 0x01, 0xd0, 0xf5, 0x04, 0x01, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x73, 0x6f, 0x6c, 0x6f, 0x2d, 0x69, 0x6f, 0x2f, 0x67, 0x6c, 0x6f, 0x6f, 0x2f, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, - 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDescOnce sync.Once - file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDescData = file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDesc -) - -func file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDescGZIP() []byte { - file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDescOnce.Do(func() { - file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDescData) - }) - return file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDescData -} - -var file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_goTypes = []any{ - (*Ingress)(nil), // 0: ingress.solo.io.Ingress - (*anypb.Any)(nil), // 1: google.protobuf.Any - (*core.Metadata)(nil), // 2: core.solo.io.Metadata -} -var file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_depIdxs = []int32{ - 1, // 0: ingress.solo.io.Ingress.kube_ingress_spec:type_name -> google.protobuf.Any - 1, // 1: ingress.solo.io.Ingress.kube_ingress_status:type_name -> google.protobuf.Any - 2, // 2: ingress.solo.io.Ingress.metadata:type_name -> core.solo.io.Metadata - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_init() } -func file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_init() { - if File_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_goTypes, - DependencyIndexes: file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_depIdxs, - MessageInfos: file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_msgTypes, - }.Build() - File_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto = out.File - file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_rawDesc = nil - file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_goTypes = nil - file_github_com_solo_io_gloo_projects_ingress_api_v1_ingress_proto_depIdxs = nil -} diff --git a/projects/ingress/pkg/api/v1/ingress.pb.hash.go b/projects/ingress/pkg/api/v1/ingress.pb.hash.go deleted file mode 100644 index 9c90b935746..00000000000 --- a/projects/ingress/pkg/api/v1/ingress.pb.hash.go +++ /dev/null @@ -1,86 +0,0 @@ -// Code generated by protoc-gen-ext. DO NOT EDIT. -// source: github.com/solo-io/gloo/projects/ingress/api/v1/ingress.proto - -package v1 - -import ( - "encoding/binary" - "errors" - "fmt" - "hash" - "hash/fnv" - - safe_hasher "github.com/solo-io/protoc-gen-ext/pkg/hasher" - "github.com/solo-io/protoc-gen-ext/pkg/hasher/hashstructure" -) - -// ensure the imports are used -var ( - _ = errors.New("") - _ = fmt.Print - _ = binary.LittleEndian - _ = new(hash.Hash64) - _ = fnv.New64 - _ = hashstructure.Hash - _ = new(safe_hasher.SafeHasher) -) - -// Hash function -// -// Deprecated: due to hashing implemention only using field values. The omission -// of the field name in the hash calculation can lead to hash collisions. -// Prefer the HashUnique function instead. -func (m *Ingress) Hash(hasher hash.Hash64) (uint64, error) { - if m == nil { - return 0, nil - } - if hasher == nil { - hasher = fnv.New64() - } - var err error - if _, err = hasher.Write([]byte("ingress.solo.io.github.com/solo-io/gloo/projects/ingress/pkg/api/v1.Ingress")); err != nil { - return 0, err - } - - if h, ok := interface{}(m.GetKubeIngressSpec()).(safe_hasher.SafeHasher); ok { - if _, err = hasher.Write([]byte("KubeIngressSpec")); err != nil { - return 0, err - } - if _, err = h.Hash(hasher); err != nil { - return 0, err - } - } else { - if fieldValue, err := hashstructure.Hash(m.GetKubeIngressSpec(), nil); err != nil { - return 0, err - } else { - if _, err = hasher.Write([]byte("KubeIngressSpec")); err != nil { - return 0, err - } - if err := binary.Write(hasher, binary.LittleEndian, fieldValue); err != nil { - return 0, err - } - } - } - - if h, ok := interface{}(m.GetMetadata()).(safe_hasher.SafeHasher); ok { - if _, err = hasher.Write([]byte("Metadata")); err != nil { - return 0, err - } - if _, err = h.Hash(hasher); err != nil { - return 0, err - } - } else { - if fieldValue, err := hashstructure.Hash(m.GetMetadata(), nil); err != nil { - return 0, err - } else { - if _, err = hasher.Write([]byte("Metadata")); err != nil { - return 0, err - } - if err := binary.Write(hasher, binary.LittleEndian, fieldValue); err != nil { - return 0, err - } - } - } - - return hasher.Sum64(), nil -} diff --git a/projects/ingress/pkg/api/v1/ingress.pb.uniquehash.go b/projects/ingress/pkg/api/v1/ingress.pb.uniquehash.go deleted file mode 100644 index 48c8a277326..00000000000 --- a/projects/ingress/pkg/api/v1/ingress.pb.uniquehash.go +++ /dev/null @@ -1,87 +0,0 @@ -// Code generated by protoc-gen-ext. DO NOT EDIT. -// source: github.com/solo-io/gloo/projects/ingress/api/v1/ingress.proto - -package v1 - -import ( - "encoding/binary" - "errors" - "fmt" - "hash" - "hash/fnv" - "strconv" - - safe_hasher "github.com/solo-io/protoc-gen-ext/pkg/hasher" - "github.com/solo-io/protoc-gen-ext/pkg/hasher/hashstructure" -) - -// ensure the imports are used -var ( - _ = errors.New("") - _ = fmt.Print - _ = binary.LittleEndian - _ = new(hash.Hash64) - _ = fnv.New64 - _ = strconv.Itoa - _ = hashstructure.Hash - _ = new(safe_hasher.SafeHasher) -) - -// HashUnique function generates a hash of the object that is unique to the object by -// hashing field name and value pairs. -// Replaces Hash due to original hashing implemention only using field values. The omission -// of the field name in the hash calculation can lead to hash collisions. -func (m *Ingress) HashUnique(hasher hash.Hash64) (uint64, error) { - if m == nil { - return 0, nil - } - if hasher == nil { - hasher = fnv.New64() - } - var err error - if _, err = hasher.Write([]byte("ingress.solo.io.github.com/solo-io/gloo/projects/ingress/pkg/api/v1.Ingress")); err != nil { - return 0, err - } - - if h, ok := interface{}(m.GetKubeIngressSpec()).(safe_hasher.SafeHasher); ok { - if _, err = hasher.Write([]byte("KubeIngressSpec")); err != nil { - return 0, err - } - if _, err = h.Hash(hasher); err != nil { - return 0, err - } - } else { - if fieldValue, err := hashstructure.Hash(m.GetKubeIngressSpec(), nil); err != nil { - return 0, err - } else { - if _, err = hasher.Write([]byte("KubeIngressSpec")); err != nil { - return 0, err - } - if err := binary.Write(hasher, binary.LittleEndian, fieldValue); err != nil { - return 0, err - } - } - } - - if h, ok := interface{}(m.GetMetadata()).(safe_hasher.SafeHasher); ok { - if _, err = hasher.Write([]byte("Metadata")); err != nil { - return 0, err - } - if _, err = h.Hash(hasher); err != nil { - return 0, err - } - } else { - if fieldValue, err := hashstructure.Hash(m.GetMetadata(), nil); err != nil { - return 0, err - } else { - if _, err = hasher.Write([]byte("Metadata")); err != nil { - return 0, err - } - if err := binary.Write(hasher, binary.LittleEndian, fieldValue); err != nil { - return 0, err - } - } - } - - return hasher.Sum64(), nil -} diff --git a/projects/ingress/pkg/api/v1/ingress.sk.go b/projects/ingress/pkg/api/v1/ingress.sk.go deleted file mode 100644 index 55e4e9ed1d6..00000000000 --- a/projects/ingress/pkg/api/v1/ingress.sk.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "log" - "sort" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - "github.com/solo-io/solo-kit/pkg/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - // Compile-time assertion - _ resources.Resource = new(Ingress) -) - -func NewIngressHashableResource() resources.HashableResource { - return new(Ingress) -} - -func NewIngress(namespace, name string) *Ingress { - ingress := &Ingress{} - ingress.SetMetadata(&core.Metadata{ - Name: name, - Namespace: namespace, - }) - return ingress -} - -func (r *Ingress) SetMetadata(meta *core.Metadata) { - r.Metadata = meta -} - -func (r *Ingress) MustHash() uint64 { - hashVal, err := r.Hash(nil) - if err != nil { - log.Panicf("error while hashing: (%s) this should never happen", err) - } - return hashVal -} - -func (r *Ingress) GroupVersionKind() schema.GroupVersionKind { - return IngressGVK -} - -type IngressList []*Ingress - -func (list IngressList) Find(namespace, name string) (*Ingress, error) { - for _, ingress := range list { - if ingress.GetMetadata().Name == name && ingress.GetMetadata().Namespace == namespace { - return ingress, nil - } - } - return nil, errors.Errorf("list did not find ingress %v.%v", namespace, name) -} - -func (list IngressList) AsResources() resources.ResourceList { - var ress resources.ResourceList - for _, ingress := range list { - ress = append(ress, ingress) - } - return ress -} - -func (list IngressList) Names() []string { - var names []string - for _, ingress := range list { - names = append(names, ingress.GetMetadata().Name) - } - return names -} - -func (list IngressList) NamespacesDotNames() []string { - var names []string - for _, ingress := range list { - names = append(names, ingress.GetMetadata().Namespace+"."+ingress.GetMetadata().Name) - } - return names -} - -func (list IngressList) Sort() IngressList { - sort.SliceStable(list, func(i, j int) bool { - return list[i].GetMetadata().Less(list[j].GetMetadata()) - }) - return list -} - -func (list IngressList) Clone() IngressList { - var ingressList IngressList - for _, ingress := range list { - ingressList = append(ingressList, resources.Clone(ingress).(*Ingress)) - } - return ingressList -} - -func (list IngressList) Each(f func(element *Ingress)) { - for _, ingress := range list { - f(ingress) - } -} - -func (list IngressList) EachResource(f func(element resources.Resource)) { - for _, ingress := range list { - f(ingress) - } -} - -func (list IngressList) AsInterfaces() []interface{} { - var asInterfaces []interface{} - list.Each(func(element *Ingress) { - asInterfaces = append(asInterfaces, element) - }) - return asInterfaces -} - -// Kubernetes Adapter for Ingress - -func (o *Ingress) GetObjectKind() schema.ObjectKind { - t := IngressCrd.TypeMeta() - return &t -} - -func (o *Ingress) DeepCopyObject() runtime.Object { - return resources.Clone(o).(*Ingress) -} - -func (o *Ingress) DeepCopyInto(out *Ingress) { - clone := resources.Clone(o).(*Ingress) - *out = *clone -} - -var ( - IngressCrd = crd.NewCrd( - "ingresses", - IngressGVK.Group, - IngressGVK.Version, - IngressGVK.Kind, - "ig", - false, - &Ingress{}) -) - -var ( - IngressGVK = schema.GroupVersionKind{ - Version: "v1", - Group: "ingress.solo.io", - Kind: "Ingress", - } -) diff --git a/projects/ingress/pkg/api/v1/ingress_client.sk.go b/projects/ingress/pkg/api/v1/ingress_client.sk.go deleted file mode 100644 index ddfa7a7ad3a..00000000000 --- a/projects/ingress/pkg/api/v1/ingress_client.sk.go +++ /dev/null @@ -1,130 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/errors" -) - -type IngressWatcher interface { - // watch namespace-scoped Ingresses - Watch(namespace string, opts clients.WatchOpts) (<-chan IngressList, <-chan error, error) -} - -type IngressClient interface { - BaseClient() clients.ResourceClient - Register() error - Read(namespace, name string, opts clients.ReadOpts) (*Ingress, error) - Write(resource *Ingress, opts clients.WriteOpts) (*Ingress, error) - Delete(namespace, name string, opts clients.DeleteOpts) error - List(namespace string, opts clients.ListOpts) (IngressList, error) - IngressWatcher -} - -type ingressClient struct { - rc clients.ResourceClient -} - -func NewIngressClient(ctx context.Context, rcFactory factory.ResourceClientFactory) (IngressClient, error) { - return NewIngressClientWithToken(ctx, rcFactory, "") -} - -func NewIngressClientWithToken(ctx context.Context, rcFactory factory.ResourceClientFactory, token string) (IngressClient, error) { - rc, err := rcFactory.NewResourceClient(ctx, factory.NewResourceClientParams{ - ResourceType: &Ingress{}, - Token: token, - }) - if err != nil { - return nil, errors.Wrapf(err, "creating base Ingress resource client") - } - return NewIngressClientWithBase(rc), nil -} - -func NewIngressClientWithBase(rc clients.ResourceClient) IngressClient { - return &ingressClient{ - rc: rc, - } -} - -func (client *ingressClient) BaseClient() clients.ResourceClient { - return client.rc -} - -func (client *ingressClient) Register() error { - return client.rc.Register() -} - -func (client *ingressClient) Read(namespace, name string, opts clients.ReadOpts) (*Ingress, error) { - opts = opts.WithDefaults() - - resource, err := client.rc.Read(namespace, name, opts) - if err != nil { - return nil, err - } - return resource.(*Ingress), nil -} - -func (client *ingressClient) Write(ingress *Ingress, opts clients.WriteOpts) (*Ingress, error) { - opts = opts.WithDefaults() - resource, err := client.rc.Write(ingress, opts) - if err != nil { - return nil, err - } - return resource.(*Ingress), nil -} - -func (client *ingressClient) Delete(namespace, name string, opts clients.DeleteOpts) error { - opts = opts.WithDefaults() - - return client.rc.Delete(namespace, name, opts) -} - -func (client *ingressClient) List(namespace string, opts clients.ListOpts) (IngressList, error) { - opts = opts.WithDefaults() - - resourceList, err := client.rc.List(namespace, opts) - if err != nil { - return nil, err - } - return convertToIngress(resourceList), nil -} - -func (client *ingressClient) Watch(namespace string, opts clients.WatchOpts) (<-chan IngressList, <-chan error, error) { - opts = opts.WithDefaults() - - resourcesChan, errs, initErr := client.rc.Watch(namespace, opts) - if initErr != nil { - return nil, nil, initErr - } - ingressesChan := make(chan IngressList) - go func() { - for { - select { - case resourceList := <-resourcesChan: - select { - case ingressesChan <- convertToIngress(resourceList): - case <-opts.Ctx.Done(): - close(ingressesChan) - return - } - case <-opts.Ctx.Done(): - close(ingressesChan) - return - } - } - }() - return ingressesChan, errs, nil -} - -func convertToIngress(resources resources.ResourceList) IngressList { - var ingressList IngressList - for _, resource := range resources { - ingressList = append(ingressList, resource.(*Ingress)) - } - return ingressList -} diff --git a/projects/ingress/pkg/api/v1/ingress_reconciler.sk.go b/projects/ingress/pkg/api/v1/ingress_reconciler.sk.go deleted file mode 100644 index d4b5967e045..00000000000 --- a/projects/ingress/pkg/api/v1/ingress_reconciler.sk.go +++ /dev/null @@ -1,47 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/reconcile" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" -) - -// Option to copy anything from the original to the desired before writing. Return value of false means don't update -type TransitionIngressFunc func(original, desired *Ingress) (bool, error) - -type IngressReconciler interface { - Reconcile(namespace string, desiredResources IngressList, transition TransitionIngressFunc, opts clients.ListOpts) error -} - -func ingresssToResources(list IngressList) resources.ResourceList { - var resourceList resources.ResourceList - for _, ingress := range list { - resourceList = append(resourceList, ingress) - } - return resourceList -} - -func NewIngressReconciler(client IngressClient, statusSetter resources.StatusSetter) IngressReconciler { - return &ingressReconciler{ - base: reconcile.NewReconciler(client.BaseClient(), statusSetter), - } -} - -type ingressReconciler struct { - base reconcile.Reconciler -} - -func (r *ingressReconciler) Reconcile(namespace string, desiredResources IngressList, transition TransitionIngressFunc, opts clients.ListOpts) error { - opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "ingress_reconciler") - var transitionResources reconcile.TransitionResourcesFunc - if transition != nil { - transitionResources = func(original, desired resources.Resource) (bool, error) { - return transition(original.(*Ingress), desired.(*Ingress)) - } - } - return r.base.Reconcile(namespace, ingresssToResources(desiredResources), transitionResources, opts) -} diff --git a/projects/ingress/pkg/api/v1/kube_service.sk.go b/projects/ingress/pkg/api/v1/kube_service.sk.go deleted file mode 100644 index e7f03c28377..00000000000 --- a/projects/ingress/pkg/api/v1/kube_service.sk.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "log" - "sort" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - "github.com/solo-io/solo-kit/pkg/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - // Compile-time assertion - _ resources.Resource = new(KubeService) -) - -func NewKubeServiceHashableResource() resources.HashableResource { - return new(KubeService) -} - -func NewKubeService(namespace, name string) *KubeService { - kubeservice := &KubeService{} - kubeservice.SetMetadata(&core.Metadata{ - Name: name, - Namespace: namespace, - }) - return kubeservice -} - -func (r *KubeService) SetMetadata(meta *core.Metadata) { - r.Metadata = meta -} - -func (r *KubeService) MustHash() uint64 { - hashVal, err := r.Hash(nil) - if err != nil { - log.Panicf("error while hashing: (%s) this should never happen", err) - } - return hashVal -} - -func (r *KubeService) GroupVersionKind() schema.GroupVersionKind { - return KubeServiceGVK -} - -type KubeServiceList []*KubeService - -func (list KubeServiceList) Find(namespace, name string) (*KubeService, error) { - for _, kubeService := range list { - if kubeService.GetMetadata().Name == name && kubeService.GetMetadata().Namespace == namespace { - return kubeService, nil - } - } - return nil, errors.Errorf("list did not find kubeService %v.%v", namespace, name) -} - -func (list KubeServiceList) AsResources() resources.ResourceList { - var ress resources.ResourceList - for _, kubeService := range list { - ress = append(ress, kubeService) - } - return ress -} - -func (list KubeServiceList) Names() []string { - var names []string - for _, kubeService := range list { - names = append(names, kubeService.GetMetadata().Name) - } - return names -} - -func (list KubeServiceList) NamespacesDotNames() []string { - var names []string - for _, kubeService := range list { - names = append(names, kubeService.GetMetadata().Namespace+"."+kubeService.GetMetadata().Name) - } - return names -} - -func (list KubeServiceList) Sort() KubeServiceList { - sort.SliceStable(list, func(i, j int) bool { - return list[i].GetMetadata().Less(list[j].GetMetadata()) - }) - return list -} - -func (list KubeServiceList) Clone() KubeServiceList { - var kubeServiceList KubeServiceList - for _, kubeService := range list { - kubeServiceList = append(kubeServiceList, resources.Clone(kubeService).(*KubeService)) - } - return kubeServiceList -} - -func (list KubeServiceList) Each(f func(element *KubeService)) { - for _, kubeService := range list { - f(kubeService) - } -} - -func (list KubeServiceList) EachResource(f func(element resources.Resource)) { - for _, kubeService := range list { - f(kubeService) - } -} - -func (list KubeServiceList) AsInterfaces() []interface{} { - var asInterfaces []interface{} - list.Each(func(element *KubeService) { - asInterfaces = append(asInterfaces, element) - }) - return asInterfaces -} - -// Kubernetes Adapter for KubeService - -func (o *KubeService) GetObjectKind() schema.ObjectKind { - t := KubeServiceCrd.TypeMeta() - return &t -} - -func (o *KubeService) DeepCopyObject() runtime.Object { - return resources.Clone(o).(*KubeService) -} - -func (o *KubeService) DeepCopyInto(out *KubeService) { - clone := resources.Clone(o).(*KubeService) - *out = *clone -} - -var ( - KubeServiceCrd = crd.NewCrd( - "services", - KubeServiceGVK.Group, - KubeServiceGVK.Version, - KubeServiceGVK.Kind, - "sv", - false, - &KubeService{}) -) - -var ( - KubeServiceGVK = schema.GroupVersionKind{ - Version: "v1", - Group: "ingress.solo.io", - Kind: "KubeService", - } -) diff --git a/projects/ingress/pkg/api/v1/kube_service_client.sk.go b/projects/ingress/pkg/api/v1/kube_service_client.sk.go deleted file mode 100644 index aa3b75c89e7..00000000000 --- a/projects/ingress/pkg/api/v1/kube_service_client.sk.go +++ /dev/null @@ -1,130 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/errors" -) - -type KubeServiceWatcher interface { - // watch namespace-scoped Services - Watch(namespace string, opts clients.WatchOpts) (<-chan KubeServiceList, <-chan error, error) -} - -type KubeServiceClient interface { - BaseClient() clients.ResourceClient - Register() error - Read(namespace, name string, opts clients.ReadOpts) (*KubeService, error) - Write(resource *KubeService, opts clients.WriteOpts) (*KubeService, error) - Delete(namespace, name string, opts clients.DeleteOpts) error - List(namespace string, opts clients.ListOpts) (KubeServiceList, error) - KubeServiceWatcher -} - -type kubeServiceClient struct { - rc clients.ResourceClient -} - -func NewKubeServiceClient(ctx context.Context, rcFactory factory.ResourceClientFactory) (KubeServiceClient, error) { - return NewKubeServiceClientWithToken(ctx, rcFactory, "") -} - -func NewKubeServiceClientWithToken(ctx context.Context, rcFactory factory.ResourceClientFactory, token string) (KubeServiceClient, error) { - rc, err := rcFactory.NewResourceClient(ctx, factory.NewResourceClientParams{ - ResourceType: &KubeService{}, - Token: token, - }) - if err != nil { - return nil, errors.Wrapf(err, "creating base KubeService resource client") - } - return NewKubeServiceClientWithBase(rc), nil -} - -func NewKubeServiceClientWithBase(rc clients.ResourceClient) KubeServiceClient { - return &kubeServiceClient{ - rc: rc, - } -} - -func (client *kubeServiceClient) BaseClient() clients.ResourceClient { - return client.rc -} - -func (client *kubeServiceClient) Register() error { - return client.rc.Register() -} - -func (client *kubeServiceClient) Read(namespace, name string, opts clients.ReadOpts) (*KubeService, error) { - opts = opts.WithDefaults() - - resource, err := client.rc.Read(namespace, name, opts) - if err != nil { - return nil, err - } - return resource.(*KubeService), nil -} - -func (client *kubeServiceClient) Write(kubeService *KubeService, opts clients.WriteOpts) (*KubeService, error) { - opts = opts.WithDefaults() - resource, err := client.rc.Write(kubeService, opts) - if err != nil { - return nil, err - } - return resource.(*KubeService), nil -} - -func (client *kubeServiceClient) Delete(namespace, name string, opts clients.DeleteOpts) error { - opts = opts.WithDefaults() - - return client.rc.Delete(namespace, name, opts) -} - -func (client *kubeServiceClient) List(namespace string, opts clients.ListOpts) (KubeServiceList, error) { - opts = opts.WithDefaults() - - resourceList, err := client.rc.List(namespace, opts) - if err != nil { - return nil, err - } - return convertToKubeService(resourceList), nil -} - -func (client *kubeServiceClient) Watch(namespace string, opts clients.WatchOpts) (<-chan KubeServiceList, <-chan error, error) { - opts = opts.WithDefaults() - - resourcesChan, errs, initErr := client.rc.Watch(namespace, opts) - if initErr != nil { - return nil, nil, initErr - } - servicesChan := make(chan KubeServiceList) - go func() { - for { - select { - case resourceList := <-resourcesChan: - select { - case servicesChan <- convertToKubeService(resourceList): - case <-opts.Ctx.Done(): - close(servicesChan) - return - } - case <-opts.Ctx.Done(): - close(servicesChan) - return - } - } - }() - return servicesChan, errs, nil -} - -func convertToKubeService(resources resources.ResourceList) KubeServiceList { - var kubeServiceList KubeServiceList - for _, resource := range resources { - kubeServiceList = append(kubeServiceList, resource.(*KubeService)) - } - return kubeServiceList -} diff --git a/projects/ingress/pkg/api/v1/kube_service_reconciler.sk.go b/projects/ingress/pkg/api/v1/kube_service_reconciler.sk.go deleted file mode 100644 index 4f022b3ea9f..00000000000 --- a/projects/ingress/pkg/api/v1/kube_service_reconciler.sk.go +++ /dev/null @@ -1,47 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/reconcile" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" -) - -// Option to copy anything from the original to the desired before writing. Return value of false means don't update -type TransitionKubeServiceFunc func(original, desired *KubeService) (bool, error) - -type KubeServiceReconciler interface { - Reconcile(namespace string, desiredResources KubeServiceList, transition TransitionKubeServiceFunc, opts clients.ListOpts) error -} - -func kubeServicesToResources(list KubeServiceList) resources.ResourceList { - var resourceList resources.ResourceList - for _, kubeService := range list { - resourceList = append(resourceList, kubeService) - } - return resourceList -} - -func NewKubeServiceReconciler(client KubeServiceClient, statusSetter resources.StatusSetter) KubeServiceReconciler { - return &kubeServiceReconciler{ - base: reconcile.NewReconciler(client.BaseClient(), statusSetter), - } -} - -type kubeServiceReconciler struct { - base reconcile.Reconciler -} - -func (r *kubeServiceReconciler) Reconcile(namespace string, desiredResources KubeServiceList, transition TransitionKubeServiceFunc, opts clients.ListOpts) error { - opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "kubeService_reconciler") - var transitionResources reconcile.TransitionResourcesFunc - if transition != nil { - transitionResources = func(original, desired resources.Resource) (bool, error) { - return transition(original.(*KubeService), desired.(*KubeService)) - } - } - return r.base.Reconcile(namespace, kubeServicesToResources(desiredResources), transitionResources, opts) -} diff --git a/projects/ingress/pkg/api/v1/service.pb.clone.go b/projects/ingress/pkg/api/v1/service.pb.clone.go deleted file mode 100644 index e6734c41e04..00000000000 --- a/projects/ingress/pkg/api/v1/service.pb.clone.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by protoc-gen-ext. DO NOT EDIT. -// source: github.com/solo-io/gloo/projects/ingress/api/v1/service.proto - -package v1 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "strings" - - "github.com/solo-io/protoc-gen-ext/pkg/clone" - "google.golang.org/protobuf/proto" - - github_com_solo_io_solo_kit_pkg_api_v1_resources_core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - - google_golang_org_protobuf_types_known_anypb "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = errors.New("") - _ = fmt.Print - _ = binary.LittleEndian - _ = bytes.Compare - _ = strings.Compare - _ = clone.Cloner(nil) - _ = proto.Message(nil) -) - -// Clone function -func (m *KubeService) Clone() proto.Message { - var target *KubeService - if m == nil { - return target - } - target = &KubeService{} - - if h, ok := interface{}(m.GetKubeServiceSpec()).(clone.Cloner); ok { - target.KubeServiceSpec = h.Clone().(*google_golang_org_protobuf_types_known_anypb.Any) - } else { - target.KubeServiceSpec = proto.Clone(m.GetKubeServiceSpec()).(*google_golang_org_protobuf_types_known_anypb.Any) - } - - if h, ok := interface{}(m.GetKubeServiceStatus()).(clone.Cloner); ok { - target.KubeServiceStatus = h.Clone().(*google_golang_org_protobuf_types_known_anypb.Any) - } else { - target.KubeServiceStatus = proto.Clone(m.GetKubeServiceStatus()).(*google_golang_org_protobuf_types_known_anypb.Any) - } - - if h, ok := interface{}(m.GetMetadata()).(clone.Cloner); ok { - target.Metadata = h.Clone().(*github_com_solo_io_solo_kit_pkg_api_v1_resources_core.Metadata) - } else { - target.Metadata = proto.Clone(m.GetMetadata()).(*github_com_solo_io_solo_kit_pkg_api_v1_resources_core.Metadata) - } - - return target -} diff --git a/projects/ingress/pkg/api/v1/service.pb.equal.go b/projects/ingress/pkg/api/v1/service.pb.equal.go deleted file mode 100644 index 784c4867f13..00000000000 --- a/projects/ingress/pkg/api/v1/service.pb.equal.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by protoc-gen-ext. DO NOT EDIT. -// source: github.com/solo-io/gloo/projects/ingress/api/v1/service.proto - -package v1 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - equality "github.com/solo-io/protoc-gen-ext/pkg/equality" -) - -// ensure the imports are used -var ( - _ = errors.New("") - _ = fmt.Print - _ = binary.LittleEndian - _ = bytes.Compare - _ = strings.Compare - _ = equality.Equalizer(nil) - _ = proto.Message(nil) -) - -// Equal function -func (m *KubeService) Equal(that interface{}) bool { - if that == nil { - return m == nil - } - - target, ok := that.(*KubeService) - if !ok { - that2, ok := that.(KubeService) - if ok { - target = &that2 - } else { - return false - } - } - if target == nil { - return m == nil - } else if m == nil { - return false - } - - if h, ok := interface{}(m.GetKubeServiceSpec()).(equality.Equalizer); ok { - if !h.Equal(target.GetKubeServiceSpec()) { - return false - } - } else { - if !proto.Equal(m.GetKubeServiceSpec(), target.GetKubeServiceSpec()) { - return false - } - } - - if h, ok := interface{}(m.GetKubeServiceStatus()).(equality.Equalizer); ok { - if !h.Equal(target.GetKubeServiceStatus()) { - return false - } - } else { - if !proto.Equal(m.GetKubeServiceStatus(), target.GetKubeServiceStatus()) { - return false - } - } - - if h, ok := interface{}(m.GetMetadata()).(equality.Equalizer); ok { - if !h.Equal(target.GetMetadata()) { - return false - } - } else { - if !proto.Equal(m.GetMetadata(), target.GetMetadata()) { - return false - } - } - - return true -} diff --git a/projects/ingress/pkg/api/v1/service.pb.go b/projects/ingress/pkg/api/v1/service.pb.go deleted file mode 100644 index c4d809ba9d8..00000000000 --- a/projects/ingress/pkg/api/v1/service.pb.go +++ /dev/null @@ -1,182 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.35.1 -// protoc v3.6.1 -// source: github.com/solo-io/gloo/projects/ingress/api/v1/service.proto - -package v1 - -import ( - reflect "reflect" - sync "sync" - - _ "github.com/solo-io/protoc-gen-ext/extproto" - core "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// A simple wrapper for a Kubernetes Service Object. -type KubeService struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // a raw byte representation of the kubernetes service this resource wraps - KubeServiceSpec *anypb.Any `protobuf:"bytes,1,opt,name=kube_service_spec,json=kubeServiceSpec,proto3" json:"kube_service_spec,omitempty"` - // a raw byte representation of the service status of the kubernetes service object - KubeServiceStatus *anypb.Any `protobuf:"bytes,2,opt,name=kube_service_status,json=kubeServiceStatus,proto3" json:"kube_service_status,omitempty"` - // Metadata contains the object metadata for this resource - Metadata *core.Metadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"` -} - -func (x *KubeService) Reset() { - *x = KubeService{} - mi := &file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *KubeService) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*KubeService) ProtoMessage() {} - -func (x *KubeService) ProtoReflect() protoreflect.Message { - mi := &file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use KubeService.ProtoReflect.Descriptor instead. -func (*KubeService) Descriptor() ([]byte, []int) { - return file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDescGZIP(), []int{0} -} - -func (x *KubeService) GetKubeServiceSpec() *anypb.Any { - if x != nil { - return x.KubeServiceSpec - } - return nil -} - -func (x *KubeService) GetKubeServiceStatus() *anypb.Any { - if x != nil { - return x.KubeServiceStatus - } - return nil -} - -func (x *KubeService) GetMetadata() *core.Metadata { - if x != nil { - return x.Metadata - } - return nil -} - -var File_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto protoreflect.FileDescriptor - -var file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDesc = []byte{ - 0x0a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6f, 0x6c, - 0x6f, 0x2d, 0x69, 0x6f, 0x2f, 0x67, 0x6c, 0x6f, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x0f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x73, 0x6f, 0x6c, 0x6f, 0x2e, 0x69, 0x6f, - 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x31, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6f, 0x6c, 0x6f, 0x2d, 0x69, 0x6f, 0x2f, - 0x73, 0x6f, 0x6c, 0x6f, 0x2d, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x31, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6f, 0x6c, 0x6f, 0x2d, - 0x69, 0x6f, 0x2f, 0x73, 0x6f, 0x6c, 0x6f, 0x2d, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x31, 0x2f, 0x73, 0x6f, 0x6c, 0x6f, 0x2d, 0x6b, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x12, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x78, 0x74, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdd, 0x01, 0x0a, 0x0b, 0x4b, 0x75, 0x62, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x11, 0x6b, 0x75, 0x62, 0x65, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x6b, 0x75, 0x62, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x12, 0x44, 0x0a, 0x13, 0x6b, 0x75, 0x62, 0x65, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x11, 0x6b, 0x75, 0x62, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x73, 0x6f, 0x6c, 0x6f, 0x2e, 0x69, 0x6f, 0x2e, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x3a, 0x12, 0x82, 0xf1, 0x04, 0x0e, 0x0a, 0x02, 0x73, 0x76, 0x12, 0x08, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x41, 0xb8, 0xf5, 0x04, 0x01, 0xc0, 0xf5, 0x04, 0x01, 0xd0, - 0xf5, 0x04, 0x01, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x73, 0x6f, 0x6c, 0x6f, 0x2d, 0x69, 0x6f, 0x2f, 0x67, 0x6c, 0x6f, 0x6f, 0x2f, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2f, 0x70, 0x6b, - 0x67, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDescOnce sync.Once - file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDescData = file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDesc -) - -func file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDescGZIP() []byte { - file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDescOnce.Do(func() { - file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDescData) - }) - return file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDescData -} - -var file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_goTypes = []any{ - (*KubeService)(nil), // 0: ingress.solo.io.KubeService - (*anypb.Any)(nil), // 1: google.protobuf.Any - (*core.Metadata)(nil), // 2: core.solo.io.Metadata -} -var file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_depIdxs = []int32{ - 1, // 0: ingress.solo.io.KubeService.kube_service_spec:type_name -> google.protobuf.Any - 1, // 1: ingress.solo.io.KubeService.kube_service_status:type_name -> google.protobuf.Any - 2, // 2: ingress.solo.io.KubeService.metadata:type_name -> core.solo.io.Metadata - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_init() } -func file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_init() { - if File_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_goTypes, - DependencyIndexes: file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_depIdxs, - MessageInfos: file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_msgTypes, - }.Build() - File_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto = out.File - file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_rawDesc = nil - file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_goTypes = nil - file_github_com_solo_io_gloo_projects_ingress_api_v1_service_proto_depIdxs = nil -} diff --git a/projects/ingress/pkg/api/v1/service.pb.hash.go b/projects/ingress/pkg/api/v1/service.pb.hash.go deleted file mode 100644 index cb2eddec1db..00000000000 --- a/projects/ingress/pkg/api/v1/service.pb.hash.go +++ /dev/null @@ -1,106 +0,0 @@ -// Code generated by protoc-gen-ext. DO NOT EDIT. -// source: github.com/solo-io/gloo/projects/ingress/api/v1/service.proto - -package v1 - -import ( - "encoding/binary" - "errors" - "fmt" - "hash" - "hash/fnv" - - safe_hasher "github.com/solo-io/protoc-gen-ext/pkg/hasher" - "github.com/solo-io/protoc-gen-ext/pkg/hasher/hashstructure" -) - -// ensure the imports are used -var ( - _ = errors.New("") - _ = fmt.Print - _ = binary.LittleEndian - _ = new(hash.Hash64) - _ = fnv.New64 - _ = hashstructure.Hash - _ = new(safe_hasher.SafeHasher) -) - -// Hash function -// -// Deprecated: due to hashing implemention only using field values. The omission -// of the field name in the hash calculation can lead to hash collisions. -// Prefer the HashUnique function instead. -func (m *KubeService) Hash(hasher hash.Hash64) (uint64, error) { - if m == nil { - return 0, nil - } - if hasher == nil { - hasher = fnv.New64() - } - var err error - if _, err = hasher.Write([]byte("ingress.solo.io.github.com/solo-io/gloo/projects/ingress/pkg/api/v1.KubeService")); err != nil { - return 0, err - } - - if h, ok := interface{}(m.GetKubeServiceSpec()).(safe_hasher.SafeHasher); ok { - if _, err = hasher.Write([]byte("KubeServiceSpec")); err != nil { - return 0, err - } - if _, err = h.Hash(hasher); err != nil { - return 0, err - } - } else { - if fieldValue, err := hashstructure.Hash(m.GetKubeServiceSpec(), nil); err != nil { - return 0, err - } else { - if _, err = hasher.Write([]byte("KubeServiceSpec")); err != nil { - return 0, err - } - if err := binary.Write(hasher, binary.LittleEndian, fieldValue); err != nil { - return 0, err - } - } - } - - if h, ok := interface{}(m.GetKubeServiceStatus()).(safe_hasher.SafeHasher); ok { - if _, err = hasher.Write([]byte("KubeServiceStatus")); err != nil { - return 0, err - } - if _, err = h.Hash(hasher); err != nil { - return 0, err - } - } else { - if fieldValue, err := hashstructure.Hash(m.GetKubeServiceStatus(), nil); err != nil { - return 0, err - } else { - if _, err = hasher.Write([]byte("KubeServiceStatus")); err != nil { - return 0, err - } - if err := binary.Write(hasher, binary.LittleEndian, fieldValue); err != nil { - return 0, err - } - } - } - - if h, ok := interface{}(m.GetMetadata()).(safe_hasher.SafeHasher); ok { - if _, err = hasher.Write([]byte("Metadata")); err != nil { - return 0, err - } - if _, err = h.Hash(hasher); err != nil { - return 0, err - } - } else { - if fieldValue, err := hashstructure.Hash(m.GetMetadata(), nil); err != nil { - return 0, err - } else { - if _, err = hasher.Write([]byte("Metadata")); err != nil { - return 0, err - } - if err := binary.Write(hasher, binary.LittleEndian, fieldValue); err != nil { - return 0, err - } - } - } - - return hasher.Sum64(), nil -} diff --git a/projects/ingress/pkg/api/v1/service.pb.uniquehash.go b/projects/ingress/pkg/api/v1/service.pb.uniquehash.go deleted file mode 100644 index 41798a1bcc2..00000000000 --- a/projects/ingress/pkg/api/v1/service.pb.uniquehash.go +++ /dev/null @@ -1,107 +0,0 @@ -// Code generated by protoc-gen-ext. DO NOT EDIT. -// source: github.com/solo-io/gloo/projects/ingress/api/v1/service.proto - -package v1 - -import ( - "encoding/binary" - "errors" - "fmt" - "hash" - "hash/fnv" - "strconv" - - safe_hasher "github.com/solo-io/protoc-gen-ext/pkg/hasher" - "github.com/solo-io/protoc-gen-ext/pkg/hasher/hashstructure" -) - -// ensure the imports are used -var ( - _ = errors.New("") - _ = fmt.Print - _ = binary.LittleEndian - _ = new(hash.Hash64) - _ = fnv.New64 - _ = strconv.Itoa - _ = hashstructure.Hash - _ = new(safe_hasher.SafeHasher) -) - -// HashUnique function generates a hash of the object that is unique to the object by -// hashing field name and value pairs. -// Replaces Hash due to original hashing implemention only using field values. The omission -// of the field name in the hash calculation can lead to hash collisions. -func (m *KubeService) HashUnique(hasher hash.Hash64) (uint64, error) { - if m == nil { - return 0, nil - } - if hasher == nil { - hasher = fnv.New64() - } - var err error - if _, err = hasher.Write([]byte("ingress.solo.io.github.com/solo-io/gloo/projects/ingress/pkg/api/v1.KubeService")); err != nil { - return 0, err - } - - if h, ok := interface{}(m.GetKubeServiceSpec()).(safe_hasher.SafeHasher); ok { - if _, err = hasher.Write([]byte("KubeServiceSpec")); err != nil { - return 0, err - } - if _, err = h.Hash(hasher); err != nil { - return 0, err - } - } else { - if fieldValue, err := hashstructure.Hash(m.GetKubeServiceSpec(), nil); err != nil { - return 0, err - } else { - if _, err = hasher.Write([]byte("KubeServiceSpec")); err != nil { - return 0, err - } - if err := binary.Write(hasher, binary.LittleEndian, fieldValue); err != nil { - return 0, err - } - } - } - - if h, ok := interface{}(m.GetKubeServiceStatus()).(safe_hasher.SafeHasher); ok { - if _, err = hasher.Write([]byte("KubeServiceStatus")); err != nil { - return 0, err - } - if _, err = h.Hash(hasher); err != nil { - return 0, err - } - } else { - if fieldValue, err := hashstructure.Hash(m.GetKubeServiceStatus(), nil); err != nil { - return 0, err - } else { - if _, err = hasher.Write([]byte("KubeServiceStatus")); err != nil { - return 0, err - } - if err := binary.Write(hasher, binary.LittleEndian, fieldValue); err != nil { - return 0, err - } - } - } - - if h, ok := interface{}(m.GetMetadata()).(safe_hasher.SafeHasher); ok { - if _, err = hasher.Write([]byte("Metadata")); err != nil { - return 0, err - } - if _, err = h.Hash(hasher); err != nil { - return 0, err - } - } else { - if fieldValue, err := hashstructure.Hash(m.GetMetadata(), nil); err != nil { - return 0, err - } else { - if _, err = hasher.Write([]byte("Metadata")); err != nil { - return 0, err - } - if err := binary.Write(hasher, binary.LittleEndian, fieldValue); err != nil { - return 0, err - } - } - } - - return hasher.Sum64(), nil -} diff --git a/projects/ingress/pkg/api/v1/status_event_loop.sk.go b/projects/ingress/pkg/api/v1/status_event_loop.sk.go deleted file mode 100644 index d2f5c445ebc..00000000000 --- a/projects/ingress/pkg/api/v1/status_event_loop.sk.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - "github.com/hashicorp/go-multierror" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" -) - -var ( - mStatusSnapshotTimeSec = stats.Float64("status.ingress.solo.io/sync/time_sec", "The time taken for a given sync", "1") - mStatusSnapshotTimeSecView = &view.View{ - Name: "status.ingress.solo.io/sync/time_sec", - Description: "The time taken for a given sync", - TagKeys: []tag.Key{tag.MustNewKey("syncer_name")}, - Measure: mStatusSnapshotTimeSec, - Aggregation: view.Distribution(0.01, 0.05, 0.1, 0.25, 0.5, 1, 5, 10, 60), - } -) - -func init() { - view.Register( - mStatusSnapshotTimeSecView, - ) -} - -type StatusSyncer interface { - Sync(context.Context, *StatusSnapshot) error -} - -type StatusSyncers []StatusSyncer - -func (s StatusSyncers) Sync(ctx context.Context, snapshot *StatusSnapshot) error { - var multiErr *multierror.Error - for _, syncer := range s { - if err := syncer.Sync(ctx, snapshot); err != nil { - multiErr = multierror.Append(multiErr, err) - } - } - return multiErr.ErrorOrNil() -} - -type statusEventLoop struct { - emitter StatusSnapshotEmitter - syncer StatusSyncer - ready chan struct{} -} - -func NewStatusEventLoop(emitter StatusSnapshotEmitter, syncer StatusSyncer) eventloop.EventLoop { - return &statusEventLoop{ - emitter: emitter, - syncer: syncer, - ready: make(chan struct{}), - } -} - -func (el *statusEventLoop) Ready() <-chan struct{} { - return el.ready -} - -func (el *statusEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { - opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "v1.event_loop") - logger := contextutils.LoggerFrom(opts.Ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(namespaces, opts) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "v1.emitter errors") - go func() { - var channelClosed bool - - // create a new context for each loop, cancel it before each loop - var cancel context.CancelFunc = func() {} - - // use closure to allow cancel function to be updated as context changes - defer func() { cancel() }() - - // cache the previous snapshot for comparison - var previousSnapshot *StatusSnapshot - - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - - if syncDecider, isDecider := el.syncer.(StatusSyncDecider); isDecider { - if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } else if syncDeciderWithContext, isDecider := el.syncer.(StatusSyncDeciderWithContext); isDecider { - if shouldSync := syncDeciderWithContext.ShouldSync(opts.Ctx, previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } - - // cancel any open watches from previous loop - cancel() - - startTime := time.Now() - ctx, span := trace.StartSpan(opts.Ctx, "status.ingress.solo.io.EventLoopSync") - ctx, canc := context.WithCancel(ctx) - cancel = canc - err := el.syncer.Sync(ctx, snapshot) - stats.RecordWithTags( - ctx, - []tag.Mutator{ - tag.Insert(skstats.SyncerNameKey, fmt.Sprintf("%T", el.syncer)), - }, - mStatusSnapshotTimeSec.M(time.Now().Sub(startTime).Seconds()), - ) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } else if !channelClosed { - channelClosed = true - close(el.ready) - } - - previousSnapshot = snapshot - - case <-opts.Ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/projects/ingress/pkg/api/v1/status_simple_event_loop.sk.go b/projects/ingress/pkg/api/v1/status_simple_event_loop.sk.go deleted file mode 100644 index 1614026b204..00000000000 --- a/projects/ingress/pkg/api/v1/status_simple_event_loop.sk.go +++ /dev/null @@ -1,134 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" -) - -// SyncDeciders Syncer which implements this interface -// can make smarter decisions over whether -// it should be restarted (including having its context cancelled) -// based on a diff of the previous and current snapshot - -// Deprecated: use StatusSyncDeciderWithContext -type StatusSyncDecider interface { - StatusSyncer - ShouldSync(old, new *StatusSnapshot) bool -} - -type StatusSyncDeciderWithContext interface { - StatusSyncer - ShouldSync(ctx context.Context, old, new *StatusSnapshot) bool -} - -type statusSimpleEventLoop struct { - emitter StatusSimpleEmitter - syncers []StatusSyncer -} - -func NewStatusSimpleEventLoop(emitter StatusSimpleEmitter, syncers ...StatusSyncer) eventloop.SimpleEventLoop { - return &statusSimpleEventLoop{ - emitter: emitter, - syncers: syncers, - } -} - -func (el *statusSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { - ctx = contextutils.WithLogger(ctx, "v1.event_loop") - logger := contextutils.LoggerFrom(ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(ctx) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - - go errutils.AggregateErrs(ctx, errs, emitterErrs, "v1.emitter errors") - go func() { - // create a new context for each syncer for each loop, cancel each before each loop - syncerCancels := make(map[StatusSyncer]context.CancelFunc) - - // use closure to allow cancel function to be updated as context changes - defer func() { - for _, cancel := range syncerCancels { - cancel() - } - }() - - // cache the previous snapshot for comparison - var previousSnapshot *StatusSnapshot - - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - - // cancel any open watches from previous loop - for _, syncer := range el.syncers { - // allow the syncer to decide if we should sync it + cancel its previous context - if syncDecider, isDecider := syncer.(StatusSyncDecider); isDecider { - if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } else if syncDeciderWithContext, isDecider := syncer.(StatusSyncDeciderWithContext); isDecider { - if shouldSync := syncDeciderWithContext.ShouldSync(ctx, previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } - - // if this syncer had a previous context, cancel it - cancel, ok := syncerCancels[syncer] - if ok { - cancel() - } - - startTime := time.Now() - ctx, span := trace.StartSpan(ctx, fmt.Sprintf("status.ingress.solo.io.SimpleEventLoopSync-%T", syncer)) - ctx, canc := context.WithCancel(ctx) - err := syncer.Sync(ctx, snapshot) - stats.RecordWithTags( - ctx, - []tag.Mutator{ - tag.Insert(skstats.SyncerNameKey, fmt.Sprintf("%T", syncer)), - }, - mStatusSnapshotTimeSec.M(time.Now().Sub(startTime).Seconds()), - ) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } - - syncerCancels[syncer] = canc - } - - previousSnapshot = snapshot - - case <-ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/projects/ingress/pkg/api/v1/status_snapshot.sk.go b/projects/ingress/pkg/api/v1/status_snapshot.sk.go deleted file mode 100644 index d180a4650cb..00000000000 --- a/projects/ingress/pkg/api/v1/status_snapshot.sk.go +++ /dev/null @@ -1,197 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "fmt" - "hash" - "hash/fnv" - "log" - - "github.com/rotisserie/eris" - "github.com/solo-io/go-utils/hashutils" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - "go.uber.org/zap" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type StatusSnapshot struct { - Services KubeServiceList - Ingresses IngressList -} - -func (s StatusSnapshot) Clone() StatusSnapshot { - return StatusSnapshot{ - Services: s.Services.Clone(), - Ingresses: s.Ingresses.Clone(), - } -} - -func (s StatusSnapshot) Hash(hasher hash.Hash64) (uint64, error) { - if hasher == nil { - hasher = fnv.New64() - } - if _, err := s.hashServices(hasher); err != nil { - return 0, err - } - if _, err := s.hashIngresses(hasher); err != nil { - return 0, err - } - return hasher.Sum64(), nil -} - -func (s StatusSnapshot) hashServices(hasher hash.Hash64) (uint64, error) { - return hashutils.HashAllSafe(hasher, s.Services.AsInterfaces()...) -} - -func (s StatusSnapshot) hashIngresses(hasher hash.Hash64) (uint64, error) { - return hashutils.HashAllSafe(hasher, s.Ingresses.AsInterfaces()...) -} - -func (s StatusSnapshot) HashFields() []zap.Field { - var fields []zap.Field - hasher := fnv.New64() - ServicesHash, err := s.hashServices(hasher) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - fields = append(fields, zap.Uint64("services", ServicesHash)) - IngressesHash, err := s.hashIngresses(hasher) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - fields = append(fields, zap.Uint64("ingresses", IngressesHash)) - snapshotHash, err := s.Hash(hasher) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - return append(fields, zap.Uint64("snapshotHash", snapshotHash)) -} - -func (s *StatusSnapshot) GetResourcesList(resource resources.Resource) (resources.ResourceList, error) { - switch resource.(type) { - case *KubeService: - return s.Services.AsResources(), nil - case *Ingress: - return s.Ingresses.AsResources(), nil - default: - return resources.ResourceList{}, eris.New("did not contain the input resource type returning empty list") - } -} - -func (s *StatusSnapshot) RemoveFromResourceList(resource resources.Resource) error { - refKey := resource.GetMetadata().Ref().Key() - switch resource.(type) { - case *KubeService: - - for i, res := range s.Services { - if refKey == res.GetMetadata().Ref().Key() { - s.Services = append(s.Services[:i], s.Services[i+1:]...) - break - } - } - return nil - case *Ingress: - - for i, res := range s.Ingresses { - if refKey == res.GetMetadata().Ref().Key() { - s.Ingresses = append(s.Ingresses[:i], s.Ingresses[i+1:]...) - break - } - } - return nil - default: - return eris.Errorf("did not remove the resource because its type does not exist [%T]", resource) - } -} - -func (s *StatusSnapshot) RemoveMatches(predicate core.Predicate) { - var Services KubeServiceList - for _, res := range s.Services { - if matches := predicate(res.GetMetadata()); !matches { - Services = append(Services, res) - } - } - s.Services = Services - var Ingresses IngressList - for _, res := range s.Ingresses { - if matches := predicate(res.GetMetadata()); !matches { - Ingresses = append(Ingresses, res) - } - } - s.Ingresses = Ingresses -} - -func (s *StatusSnapshot) UpsertToResourceList(resource resources.Resource) error { - refKey := resource.GetMetadata().Ref().Key() - switch typed := resource.(type) { - case *KubeService: - updated := false - for i, res := range s.Services { - if refKey == res.GetMetadata().Ref().Key() { - s.Services[i] = typed - updated = true - } - } - if !updated { - s.Services = append(s.Services, typed) - } - s.Services.Sort() - return nil - case *Ingress: - updated := false - for i, res := range s.Ingresses { - if refKey == res.GetMetadata().Ref().Key() { - s.Ingresses[i] = typed - updated = true - } - } - if !updated { - s.Ingresses = append(s.Ingresses, typed) - } - s.Ingresses.Sort() - return nil - default: - return eris.Errorf("did not add/replace the resource type because it does not exist %T", resource) - } -} - -type StatusSnapshotStringer struct { - Version uint64 - Services []string - Ingresses []string -} - -func (ss StatusSnapshotStringer) String() string { - s := fmt.Sprintf("StatusSnapshot %v\n", ss.Version) - - s += fmt.Sprintf(" Services %v\n", len(ss.Services)) - for _, name := range ss.Services { - s += fmt.Sprintf(" %v\n", name) - } - - s += fmt.Sprintf(" Ingresses %v\n", len(ss.Ingresses)) - for _, name := range ss.Ingresses { - s += fmt.Sprintf(" %v\n", name) - } - - return s -} - -func (s StatusSnapshot) Stringer() StatusSnapshotStringer { - snapshotHash, err := s.Hash(nil) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - return StatusSnapshotStringer{ - Version: snapshotHash, - Services: s.Services.NamespacesDotNames(), - Ingresses: s.Ingresses.NamespacesDotNames(), - } -} - -var StatusGvkToHashableResource = map[schema.GroupVersionKind]func() resources.HashableResource{ - KubeServiceGVK: NewKubeServiceHashableResource, - IngressGVK: NewIngressHashableResource, -} diff --git a/projects/ingress/pkg/api/v1/status_snapshot_emitter.sk.go b/projects/ingress/pkg/api/v1/status_snapshot_emitter.sk.go deleted file mode 100644 index cf6ccfb6671..00000000000 --- a/projects/ingress/pkg/api/v1/status_snapshot_emitter.sk.go +++ /dev/null @@ -1,330 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.uber.org/zap" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" -) - -var ( - // Deprecated. See mStatusResourcesIn - mStatusSnapshotIn = stats.Int64("status.ingress.solo.io/emitter/snap_in", "Deprecated. Use status.ingress.solo.io/emitter/resources_in. The number of snapshots in", "1") - - // metrics for emitter - mStatusResourcesIn = stats.Int64("status.ingress.solo.io/emitter/resources_in", "The number of resource lists received on open watch channels", "1") - mStatusSnapshotOut = stats.Int64("status.ingress.solo.io/emitter/snap_out", "The number of snapshots out", "1") - mStatusSnapshotMissed = stats.Int64("status.ingress.solo.io/emitter/snap_missed", "The number of snapshots missed", "1") - - // views for emitter - // deprecated: see statusResourcesInView - statussnapshotInView = &view.View{ - Name: "status.ingress.solo.io/emitter/snap_in", - Measure: mStatusSnapshotIn, - Description: "Deprecated. Use status.ingress.solo.io/emitter/resources_in. The number of snapshots updates coming in.", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - - statusResourcesInView = &view.View{ - Name: "status.ingress.solo.io/emitter/resources_in", - Measure: mStatusResourcesIn, - Description: "The number of resource lists received on open watch channels", - Aggregation: view.Count(), - TagKeys: []tag.Key{ - skstats.NamespaceKey, - skstats.ResourceKey, - }, - } - statussnapshotOutView = &view.View{ - Name: "status.ingress.solo.io/emitter/snap_out", - Measure: mStatusSnapshotOut, - Description: "The number of snapshots updates going out", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - statussnapshotMissedView = &view.View{ - Name: "status.ingress.solo.io/emitter/snap_missed", - Measure: mStatusSnapshotMissed, - Description: "The number of snapshots updates going missed. this can happen in heavy load. missed snapshot will be re-tried after a second.", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } -) - -func init() { - view.Register( - statussnapshotInView, - statussnapshotOutView, - statussnapshotMissedView, - statusResourcesInView, - ) -} - -type StatusSnapshotEmitter interface { - Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *StatusSnapshot, <-chan error, error) -} - -type StatusEmitter interface { - StatusSnapshotEmitter - Register() error - KubeService() KubeServiceClient - Ingress() IngressClient -} - -func NewStatusEmitter(kubeServiceClient KubeServiceClient, ingressClient IngressClient) StatusEmitter { - return NewStatusEmitterWithEmit(kubeServiceClient, ingressClient, make(chan struct{})) -} - -func NewStatusEmitterWithEmit(kubeServiceClient KubeServiceClient, ingressClient IngressClient, emit <-chan struct{}) StatusEmitter { - return &statusEmitter{ - kubeService: kubeServiceClient, - ingress: ingressClient, - forceEmit: emit, - } -} - -type statusEmitter struct { - forceEmit <-chan struct{} - kubeService KubeServiceClient - ingress IngressClient -} - -func (c *statusEmitter) Register() error { - if err := c.kubeService.Register(); err != nil { - return err - } - if err := c.ingress.Register(); err != nil { - return err - } - return nil -} - -func (c *statusEmitter) KubeService() KubeServiceClient { - return c.kubeService -} - -func (c *statusEmitter) Ingress() IngressClient { - return c.ingress -} - -func (c *statusEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *StatusSnapshot, <-chan error, error) { - - if len(watchNamespaces) == 0 { - watchNamespaces = []string{""} - } - - for _, ns := range watchNamespaces { - if ns == "" && len(watchNamespaces) > 1 { - return nil, nil, errors.Errorf("the \"\" namespace is used to watch all namespaces. Snapshots can either be tracked for " + - "specific namespaces or \"\" AllNamespaces, but not both.") - } - } - - errs := make(chan error) - var done sync.WaitGroup - ctx := opts.Ctx - /* Create channel for KubeService */ - type kubeServiceListWithNamespace struct { - list KubeServiceList - namespace string - } - kubeServiceChan := make(chan kubeServiceListWithNamespace) - - var initialKubeServiceList KubeServiceList - /* Create channel for Ingress */ - type ingressListWithNamespace struct { - list IngressList - namespace string - } - ingressChan := make(chan ingressListWithNamespace) - - var initialIngressList IngressList - - currentSnapshot := StatusSnapshot{} - servicesByNamespace := make(map[string]KubeServiceList) - ingressesByNamespace := make(map[string]IngressList) - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for KubeService */ - { - services, err := c.kubeService.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial KubeService list") - } - initialKubeServiceList = append(initialKubeServiceList, services...) - servicesByNamespace[namespace] = services - } - kubeServiceNamespacesChan, kubeServiceErrs, err := c.kubeService.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting KubeService watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, kubeServiceErrs, namespace+"-services") - }(namespace) - /* Setup namespaced watch for Ingress */ - { - ingresses, err := c.ingress.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial Ingress list") - } - initialIngressList = append(initialIngressList, ingresses...) - ingressesByNamespace[namespace] = ingresses - } - ingressNamespacesChan, ingressErrs, err := c.ingress.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting Ingress watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, ingressErrs, namespace+"-ingresses") - }(namespace) - - /* Watch for changes and update snapshot */ - go func(namespace string) { - for { - select { - case <-ctx.Done(): - return - case kubeServiceList, ok := <-kubeServiceNamespacesChan: - if !ok { - return - } - select { - case <-ctx.Done(): - return - case kubeServiceChan <- kubeServiceListWithNamespace{list: kubeServiceList, namespace: namespace}: - } - case ingressList, ok := <-ingressNamespacesChan: - if !ok { - return - } - select { - case <-ctx.Done(): - return - case ingressChan <- ingressListWithNamespace{list: ingressList, namespace: namespace}: - } - } - } - }(namespace) - } - /* Initialize snapshot for Services */ - currentSnapshot.Services = initialKubeServiceList.Sort() - /* Initialize snapshot for Ingresses */ - currentSnapshot.Ingresses = initialIngressList.Sort() - - snapshots := make(chan *StatusSnapshot) - go func() { - // sent initial snapshot to kick off the watch - initialSnapshot := currentSnapshot.Clone() - snapshots <- &initialSnapshot - - timer := time.NewTicker(time.Second * 1) - previousHash, err := currentSnapshot.Hash(nil) - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - sync := func() { - currentHash, err := currentSnapshot.Hash(nil) - // this should never happen, so panic if it does - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - if previousHash == currentHash { - return - } - - sentSnapshot := currentSnapshot.Clone() - select { - case snapshots <- &sentSnapshot: - stats.Record(ctx, mStatusSnapshotOut.M(1)) - previousHash = currentHash - default: - stats.Record(ctx, mStatusSnapshotMissed.M(1)) - } - } - - defer func() { - close(snapshots) - // we must wait for done before closing the error chan, - // to avoid sending on close channel. - done.Wait() - close(errs) - }() - for { - record := func() { stats.Record(ctx, mStatusSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case kubeServiceNamespacedList, ok := <-kubeServiceChan: - if !ok { - return - } - record() - - namespace := kubeServiceNamespacedList.namespace - - skstats.IncrementResourceCount( - ctx, - namespace, - "kube_service", - mStatusResourcesIn, - ) - - // merge lists by namespace - servicesByNamespace[namespace] = kubeServiceNamespacedList.list - var kubeServiceList KubeServiceList - for _, services := range servicesByNamespace { - kubeServiceList = append(kubeServiceList, services...) - } - currentSnapshot.Services = kubeServiceList.Sort() - case ingressNamespacedList, ok := <-ingressChan: - if !ok { - return - } - record() - - namespace := ingressNamespacedList.namespace - - skstats.IncrementResourceCount( - ctx, - namespace, - "ingress", - mStatusResourcesIn, - ) - - // merge lists by namespace - ingressesByNamespace[namespace] = ingressNamespacedList.list - var ingressList IngressList - for _, ingresses := range ingressesByNamespace { - ingressList = append(ingressList, ingresses...) - } - currentSnapshot.Ingresses = ingressList.Sort() - } - } - }() - return snapshots, errs, nil -} diff --git a/projects/ingress/pkg/api/v1/status_snapshot_simple_emitter.sk.go b/projects/ingress/pkg/api/v1/status_snapshot_simple_emitter.sk.go deleted file mode 100644 index 3784fffca28..00000000000 --- a/projects/ingress/pkg/api/v1/status_snapshot_simple_emitter.sk.go +++ /dev/null @@ -1,109 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - "go.opencensus.io/stats" - "go.uber.org/zap" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" -) - -type StatusSimpleEmitter interface { - Snapshots(ctx context.Context) (<-chan *StatusSnapshot, <-chan error, error) -} - -func NewStatusSimpleEmitter(aggregatedWatch clients.ResourceWatch) StatusSimpleEmitter { - return NewStatusSimpleEmitterWithEmit(aggregatedWatch, make(chan struct{})) -} - -func NewStatusSimpleEmitterWithEmit(aggregatedWatch clients.ResourceWatch, emit <-chan struct{}) StatusSimpleEmitter { - return &statusSimpleEmitter{ - aggregatedWatch: aggregatedWatch, - forceEmit: emit, - } -} - -type statusSimpleEmitter struct { - forceEmit <-chan struct{} - aggregatedWatch clients.ResourceWatch -} - -func (c *statusSimpleEmitter) Snapshots(ctx context.Context) (<-chan *StatusSnapshot, <-chan error, error) { - snapshots := make(chan *StatusSnapshot) - errs := make(chan error) - - untyped, watchErrs, err := c.aggregatedWatch(ctx) - if err != nil { - return nil, nil, err - } - - go errutils.AggregateErrs(ctx, errs, watchErrs, "status-emitter") - - go func() { - currentSnapshot := StatusSnapshot{} - timer := time.NewTicker(time.Second * 1) - var previousHash uint64 - sync := func() { - currentHash, err := currentSnapshot.Hash(nil) - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - if previousHash == currentHash { - return - } - - previousHash = currentHash - - stats.Record(ctx, mStatusSnapshotOut.M(1)) - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - } - - defer func() { - close(snapshots) - close(errs) - }() - - for { - record := func() { stats.Record(ctx, mStatusSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case untypedList := <-untyped: - record() - - currentSnapshot = StatusSnapshot{} - for _, res := range untypedList { - switch typed := res.(type) { - case *KubeService: - currentSnapshot.Services = append(currentSnapshot.Services, typed) - case *Ingress: - currentSnapshot.Ingresses = append(currentSnapshot.Ingresses, typed) - default: - select { - case errs <- fmt.Errorf("StatusSnapshotEmitter "+ - "cannot process resource %v of type %T", res.GetMetadata().Ref(), res): - case <-ctx.Done(): - return - } - } - } - - } - } - }() - return snapshots, errs, nil -} diff --git a/projects/ingress/pkg/api/v1/translator_event_loop.sk.go b/projects/ingress/pkg/api/v1/translator_event_loop.sk.go deleted file mode 100644 index b307dfc5bbd..00000000000 --- a/projects/ingress/pkg/api/v1/translator_event_loop.sk.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - "github.com/hashicorp/go-multierror" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" -) - -var ( - mTranslatorSnapshotTimeSec = stats.Float64("translator.ingress.solo.io/sync/time_sec", "The time taken for a given sync", "1") - mTranslatorSnapshotTimeSecView = &view.View{ - Name: "translator.ingress.solo.io/sync/time_sec", - Description: "The time taken for a given sync", - TagKeys: []tag.Key{tag.MustNewKey("syncer_name")}, - Measure: mTranslatorSnapshotTimeSec, - Aggregation: view.Distribution(0.01, 0.05, 0.1, 0.25, 0.5, 1, 5, 10, 60), - } -) - -func init() { - view.Register( - mTranslatorSnapshotTimeSecView, - ) -} - -type TranslatorSyncer interface { - Sync(context.Context, *TranslatorSnapshot) error -} - -type TranslatorSyncers []TranslatorSyncer - -func (s TranslatorSyncers) Sync(ctx context.Context, snapshot *TranslatorSnapshot) error { - var multiErr *multierror.Error - for _, syncer := range s { - if err := syncer.Sync(ctx, snapshot); err != nil { - multiErr = multierror.Append(multiErr, err) - } - } - return multiErr.ErrorOrNil() -} - -type translatorEventLoop struct { - emitter TranslatorSnapshotEmitter - syncer TranslatorSyncer - ready chan struct{} -} - -func NewTranslatorEventLoop(emitter TranslatorSnapshotEmitter, syncer TranslatorSyncer) eventloop.EventLoop { - return &translatorEventLoop{ - emitter: emitter, - syncer: syncer, - ready: make(chan struct{}), - } -} - -func (el *translatorEventLoop) Ready() <-chan struct{} { - return el.ready -} - -func (el *translatorEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { - opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "v1.event_loop") - logger := contextutils.LoggerFrom(opts.Ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(namespaces, opts) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "v1.emitter errors") - go func() { - var channelClosed bool - - // create a new context for each loop, cancel it before each loop - var cancel context.CancelFunc = func() {} - - // use closure to allow cancel function to be updated as context changes - defer func() { cancel() }() - - // cache the previous snapshot for comparison - var previousSnapshot *TranslatorSnapshot - - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - - if syncDecider, isDecider := el.syncer.(TranslatorSyncDecider); isDecider { - if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } else if syncDeciderWithContext, isDecider := el.syncer.(TranslatorSyncDeciderWithContext); isDecider { - if shouldSync := syncDeciderWithContext.ShouldSync(opts.Ctx, previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } - - // cancel any open watches from previous loop - cancel() - - startTime := time.Now() - ctx, span := trace.StartSpan(opts.Ctx, "translator.ingress.solo.io.EventLoopSync") - ctx, canc := context.WithCancel(ctx) - cancel = canc - err := el.syncer.Sync(ctx, snapshot) - stats.RecordWithTags( - ctx, - []tag.Mutator{ - tag.Insert(skstats.SyncerNameKey, fmt.Sprintf("%T", el.syncer)), - }, - mTranslatorSnapshotTimeSec.M(time.Now().Sub(startTime).Seconds()), - ) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } else if !channelClosed { - channelClosed = true - close(el.ready) - } - - previousSnapshot = snapshot - - case <-opts.Ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/projects/ingress/pkg/api/v1/translator_simple_event_loop.sk.go b/projects/ingress/pkg/api/v1/translator_simple_event_loop.sk.go deleted file mode 100644 index a8cc0c62d7e..00000000000 --- a/projects/ingress/pkg/api/v1/translator_simple_event_loop.sk.go +++ /dev/null @@ -1,134 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" -) - -// SyncDeciders Syncer which implements this interface -// can make smarter decisions over whether -// it should be restarted (including having its context cancelled) -// based on a diff of the previous and current snapshot - -// Deprecated: use TranslatorSyncDeciderWithContext -type TranslatorSyncDecider interface { - TranslatorSyncer - ShouldSync(old, new *TranslatorSnapshot) bool -} - -type TranslatorSyncDeciderWithContext interface { - TranslatorSyncer - ShouldSync(ctx context.Context, old, new *TranslatorSnapshot) bool -} - -type translatorSimpleEventLoop struct { - emitter TranslatorSimpleEmitter - syncers []TranslatorSyncer -} - -func NewTranslatorSimpleEventLoop(emitter TranslatorSimpleEmitter, syncers ...TranslatorSyncer) eventloop.SimpleEventLoop { - return &translatorSimpleEventLoop{ - emitter: emitter, - syncers: syncers, - } -} - -func (el *translatorSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { - ctx = contextutils.WithLogger(ctx, "v1.event_loop") - logger := contextutils.LoggerFrom(ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(ctx) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - - go errutils.AggregateErrs(ctx, errs, emitterErrs, "v1.emitter errors") - go func() { - // create a new context for each syncer for each loop, cancel each before each loop - syncerCancels := make(map[TranslatorSyncer]context.CancelFunc) - - // use closure to allow cancel function to be updated as context changes - defer func() { - for _, cancel := range syncerCancels { - cancel() - } - }() - - // cache the previous snapshot for comparison - var previousSnapshot *TranslatorSnapshot - - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - - // cancel any open watches from previous loop - for _, syncer := range el.syncers { - // allow the syncer to decide if we should sync it + cancel its previous context - if syncDecider, isDecider := syncer.(TranslatorSyncDecider); isDecider { - if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } else if syncDeciderWithContext, isDecider := syncer.(TranslatorSyncDeciderWithContext); isDecider { - if shouldSync := syncDeciderWithContext.ShouldSync(ctx, previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } - - // if this syncer had a previous context, cancel it - cancel, ok := syncerCancels[syncer] - if ok { - cancel() - } - - startTime := time.Now() - ctx, span := trace.StartSpan(ctx, fmt.Sprintf("translator.ingress.solo.io.SimpleEventLoopSync-%T", syncer)) - ctx, canc := context.WithCancel(ctx) - err := syncer.Sync(ctx, snapshot) - stats.RecordWithTags( - ctx, - []tag.Mutator{ - tag.Insert(skstats.SyncerNameKey, fmt.Sprintf("%T", syncer)), - }, - mTranslatorSnapshotTimeSec.M(time.Now().Sub(startTime).Seconds()), - ) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } - - syncerCancels[syncer] = canc - } - - previousSnapshot = snapshot - - case <-ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/projects/ingress/pkg/api/v1/translator_snapshot.sk.go b/projects/ingress/pkg/api/v1/translator_snapshot.sk.go deleted file mode 100644 index 1685a5a4c3f..00000000000 --- a/projects/ingress/pkg/api/v1/translator_snapshot.sk.go +++ /dev/null @@ -1,252 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "fmt" - "hash" - "hash/fnv" - "log" - - gloo_solo_io "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - - "github.com/rotisserie/eris" - "github.com/solo-io/go-utils/hashutils" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - "go.uber.org/zap" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type TranslatorSnapshot struct { - Upstreams gloo_solo_io.UpstreamList - Services KubeServiceList - Ingresses IngressList -} - -func (s TranslatorSnapshot) Clone() TranslatorSnapshot { - return TranslatorSnapshot{ - Upstreams: s.Upstreams.Clone(), - Services: s.Services.Clone(), - Ingresses: s.Ingresses.Clone(), - } -} - -func (s TranslatorSnapshot) Hash(hasher hash.Hash64) (uint64, error) { - if hasher == nil { - hasher = fnv.New64() - } - if _, err := s.hashUpstreams(hasher); err != nil { - return 0, err - } - if _, err := s.hashServices(hasher); err != nil { - return 0, err - } - if _, err := s.hashIngresses(hasher); err != nil { - return 0, err - } - return hasher.Sum64(), nil -} - -func (s TranslatorSnapshot) hashUpstreams(hasher hash.Hash64) (uint64, error) { - return hashutils.HashAllSafe(hasher, s.Upstreams.AsInterfaces()...) -} - -func (s TranslatorSnapshot) hashServices(hasher hash.Hash64) (uint64, error) { - return hashutils.HashAllSafe(hasher, s.Services.AsInterfaces()...) -} - -func (s TranslatorSnapshot) hashIngresses(hasher hash.Hash64) (uint64, error) { - return hashutils.HashAllSafe(hasher, s.Ingresses.AsInterfaces()...) -} - -func (s TranslatorSnapshot) HashFields() []zap.Field { - var fields []zap.Field - hasher := fnv.New64() - UpstreamsHash, err := s.hashUpstreams(hasher) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - fields = append(fields, zap.Uint64("upstreams", UpstreamsHash)) - ServicesHash, err := s.hashServices(hasher) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - fields = append(fields, zap.Uint64("services", ServicesHash)) - IngressesHash, err := s.hashIngresses(hasher) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - fields = append(fields, zap.Uint64("ingresses", IngressesHash)) - snapshotHash, err := s.Hash(hasher) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - return append(fields, zap.Uint64("snapshotHash", snapshotHash)) -} - -func (s *TranslatorSnapshot) GetResourcesList(resource resources.Resource) (resources.ResourceList, error) { - switch resource.(type) { - case *gloo_solo_io.Upstream: - return s.Upstreams.AsResources(), nil - case *KubeService: - return s.Services.AsResources(), nil - case *Ingress: - return s.Ingresses.AsResources(), nil - default: - return resources.ResourceList{}, eris.New("did not contain the input resource type returning empty list") - } -} - -func (s *TranslatorSnapshot) RemoveFromResourceList(resource resources.Resource) error { - refKey := resource.GetMetadata().Ref().Key() - switch resource.(type) { - case *gloo_solo_io.Upstream: - - for i, res := range s.Upstreams { - if refKey == res.GetMetadata().Ref().Key() { - s.Upstreams = append(s.Upstreams[:i], s.Upstreams[i+1:]...) - break - } - } - return nil - case *KubeService: - - for i, res := range s.Services { - if refKey == res.GetMetadata().Ref().Key() { - s.Services = append(s.Services[:i], s.Services[i+1:]...) - break - } - } - return nil - case *Ingress: - - for i, res := range s.Ingresses { - if refKey == res.GetMetadata().Ref().Key() { - s.Ingresses = append(s.Ingresses[:i], s.Ingresses[i+1:]...) - break - } - } - return nil - default: - return eris.Errorf("did not remove the resource because its type does not exist [%T]", resource) - } -} - -func (s *TranslatorSnapshot) RemoveMatches(predicate core.Predicate) { - var Upstreams gloo_solo_io.UpstreamList - for _, res := range s.Upstreams { - if matches := predicate(res.GetMetadata()); !matches { - Upstreams = append(Upstreams, res) - } - } - s.Upstreams = Upstreams - var Services KubeServiceList - for _, res := range s.Services { - if matches := predicate(res.GetMetadata()); !matches { - Services = append(Services, res) - } - } - s.Services = Services - var Ingresses IngressList - for _, res := range s.Ingresses { - if matches := predicate(res.GetMetadata()); !matches { - Ingresses = append(Ingresses, res) - } - } - s.Ingresses = Ingresses -} - -func (s *TranslatorSnapshot) UpsertToResourceList(resource resources.Resource) error { - refKey := resource.GetMetadata().Ref().Key() - switch typed := resource.(type) { - case *gloo_solo_io.Upstream: - updated := false - for i, res := range s.Upstreams { - if refKey == res.GetMetadata().Ref().Key() { - s.Upstreams[i] = typed - updated = true - } - } - if !updated { - s.Upstreams = append(s.Upstreams, typed) - } - s.Upstreams.Sort() - return nil - case *KubeService: - updated := false - for i, res := range s.Services { - if refKey == res.GetMetadata().Ref().Key() { - s.Services[i] = typed - updated = true - } - } - if !updated { - s.Services = append(s.Services, typed) - } - s.Services.Sort() - return nil - case *Ingress: - updated := false - for i, res := range s.Ingresses { - if refKey == res.GetMetadata().Ref().Key() { - s.Ingresses[i] = typed - updated = true - } - } - if !updated { - s.Ingresses = append(s.Ingresses, typed) - } - s.Ingresses.Sort() - return nil - default: - return eris.Errorf("did not add/replace the resource type because it does not exist %T", resource) - } -} - -type TranslatorSnapshotStringer struct { - Version uint64 - Upstreams []string - Services []string - Ingresses []string -} - -func (ss TranslatorSnapshotStringer) String() string { - s := fmt.Sprintf("TranslatorSnapshot %v\n", ss.Version) - - s += fmt.Sprintf(" Upstreams %v\n", len(ss.Upstreams)) - for _, name := range ss.Upstreams { - s += fmt.Sprintf(" %v\n", name) - } - - s += fmt.Sprintf(" Services %v\n", len(ss.Services)) - for _, name := range ss.Services { - s += fmt.Sprintf(" %v\n", name) - } - - s += fmt.Sprintf(" Ingresses %v\n", len(ss.Ingresses)) - for _, name := range ss.Ingresses { - s += fmt.Sprintf(" %v\n", name) - } - - return s -} - -func (s TranslatorSnapshot) Stringer() TranslatorSnapshotStringer { - snapshotHash, err := s.Hash(nil) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - return TranslatorSnapshotStringer{ - Version: snapshotHash, - Upstreams: s.Upstreams.NamespacesDotNames(), - Services: s.Services.NamespacesDotNames(), - Ingresses: s.Ingresses.NamespacesDotNames(), - } -} - -var TranslatorGvkToHashableResource = map[schema.GroupVersionKind]func() resources.HashableResource{ - gloo_solo_io.UpstreamGVK: gloo_solo_io.NewUpstreamHashableResource, - KubeServiceGVK: NewKubeServiceHashableResource, - IngressGVK: NewIngressHashableResource, -} diff --git a/projects/ingress/pkg/api/v1/translator_snapshot_emitter.sk.go b/projects/ingress/pkg/api/v1/translator_snapshot_emitter.sk.go deleted file mode 100644 index fa0891d8182..00000000000 --- a/projects/ingress/pkg/api/v1/translator_snapshot_emitter.sk.go +++ /dev/null @@ -1,403 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "sync" - "time" - - gloo_solo_io "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.uber.org/zap" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" -) - -var ( - // Deprecated. See mTranslatorResourcesIn - mTranslatorSnapshotIn = stats.Int64("translator.ingress.solo.io/emitter/snap_in", "Deprecated. Use translator.ingress.solo.io/emitter/resources_in. The number of snapshots in", "1") - - // metrics for emitter - mTranslatorResourcesIn = stats.Int64("translator.ingress.solo.io/emitter/resources_in", "The number of resource lists received on open watch channels", "1") - mTranslatorSnapshotOut = stats.Int64("translator.ingress.solo.io/emitter/snap_out", "The number of snapshots out", "1") - mTranslatorSnapshotMissed = stats.Int64("translator.ingress.solo.io/emitter/snap_missed", "The number of snapshots missed", "1") - - // views for emitter - // deprecated: see translatorResourcesInView - translatorsnapshotInView = &view.View{ - Name: "translator.ingress.solo.io/emitter/snap_in", - Measure: mTranslatorSnapshotIn, - Description: "Deprecated. Use translator.ingress.solo.io/emitter/resources_in. The number of snapshots updates coming in.", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - - translatorResourcesInView = &view.View{ - Name: "translator.ingress.solo.io/emitter/resources_in", - Measure: mTranslatorResourcesIn, - Description: "The number of resource lists received on open watch channels", - Aggregation: view.Count(), - TagKeys: []tag.Key{ - skstats.NamespaceKey, - skstats.ResourceKey, - }, - } - translatorsnapshotOutView = &view.View{ - Name: "translator.ingress.solo.io/emitter/snap_out", - Measure: mTranslatorSnapshotOut, - Description: "The number of snapshots updates going out", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - translatorsnapshotMissedView = &view.View{ - Name: "translator.ingress.solo.io/emitter/snap_missed", - Measure: mTranslatorSnapshotMissed, - Description: "The number of snapshots updates going missed. this can happen in heavy load. missed snapshot will be re-tried after a second.", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } -) - -func init() { - view.Register( - translatorsnapshotInView, - translatorsnapshotOutView, - translatorsnapshotMissedView, - translatorResourcesInView, - ) -} - -type TranslatorSnapshotEmitter interface { - Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TranslatorSnapshot, <-chan error, error) -} - -type TranslatorEmitter interface { - TranslatorSnapshotEmitter - Register() error - Upstream() gloo_solo_io.UpstreamClient - KubeService() KubeServiceClient - Ingress() IngressClient -} - -func NewTranslatorEmitter(upstreamClient gloo_solo_io.UpstreamClient, kubeServiceClient KubeServiceClient, ingressClient IngressClient) TranslatorEmitter { - return NewTranslatorEmitterWithEmit(upstreamClient, kubeServiceClient, ingressClient, make(chan struct{})) -} - -func NewTranslatorEmitterWithEmit(upstreamClient gloo_solo_io.UpstreamClient, kubeServiceClient KubeServiceClient, ingressClient IngressClient, emit <-chan struct{}) TranslatorEmitter { - return &translatorEmitter{ - upstream: upstreamClient, - kubeService: kubeServiceClient, - ingress: ingressClient, - forceEmit: emit, - } -} - -type translatorEmitter struct { - forceEmit <-chan struct{} - upstream gloo_solo_io.UpstreamClient - kubeService KubeServiceClient - ingress IngressClient -} - -func (c *translatorEmitter) Register() error { - if err := c.upstream.Register(); err != nil { - return err - } - if err := c.kubeService.Register(); err != nil { - return err - } - if err := c.ingress.Register(); err != nil { - return err - } - return nil -} - -func (c *translatorEmitter) Upstream() gloo_solo_io.UpstreamClient { - return c.upstream -} - -func (c *translatorEmitter) KubeService() KubeServiceClient { - return c.kubeService -} - -func (c *translatorEmitter) Ingress() IngressClient { - return c.ingress -} - -func (c *translatorEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TranslatorSnapshot, <-chan error, error) { - - if len(watchNamespaces) == 0 { - watchNamespaces = []string{""} - } - - for _, ns := range watchNamespaces { - if ns == "" && len(watchNamespaces) > 1 { - return nil, nil, errors.Errorf("the \"\" namespace is used to watch all namespaces. Snapshots can either be tracked for " + - "specific namespaces or \"\" AllNamespaces, but not both.") - } - } - - errs := make(chan error) - var done sync.WaitGroup - ctx := opts.Ctx - /* Create channel for Upstream */ - type upstreamListWithNamespace struct { - list gloo_solo_io.UpstreamList - namespace string - } - upstreamChan := make(chan upstreamListWithNamespace) - - var initialUpstreamList gloo_solo_io.UpstreamList - /* Create channel for KubeService */ - type kubeServiceListWithNamespace struct { - list KubeServiceList - namespace string - } - kubeServiceChan := make(chan kubeServiceListWithNamespace) - - var initialKubeServiceList KubeServiceList - /* Create channel for Ingress */ - type ingressListWithNamespace struct { - list IngressList - namespace string - } - ingressChan := make(chan ingressListWithNamespace) - - var initialIngressList IngressList - - currentSnapshot := TranslatorSnapshot{} - upstreamsByNamespace := make(map[string]gloo_solo_io.UpstreamList) - servicesByNamespace := make(map[string]KubeServiceList) - ingressesByNamespace := make(map[string]IngressList) - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for Upstream */ - { - upstreams, err := c.upstream.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial Upstream list") - } - initialUpstreamList = append(initialUpstreamList, upstreams...) - upstreamsByNamespace[namespace] = upstreams - } - upstreamNamespacesChan, upstreamErrs, err := c.upstream.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting Upstream watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, upstreamErrs, namespace+"-upstreams") - }(namespace) - /* Setup namespaced watch for KubeService */ - { - services, err := c.kubeService.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial KubeService list") - } - initialKubeServiceList = append(initialKubeServiceList, services...) - servicesByNamespace[namespace] = services - } - kubeServiceNamespacesChan, kubeServiceErrs, err := c.kubeService.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting KubeService watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, kubeServiceErrs, namespace+"-services") - }(namespace) - /* Setup namespaced watch for Ingress */ - { - ingresses, err := c.ingress.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial Ingress list") - } - initialIngressList = append(initialIngressList, ingresses...) - ingressesByNamespace[namespace] = ingresses - } - ingressNamespacesChan, ingressErrs, err := c.ingress.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting Ingress watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, ingressErrs, namespace+"-ingresses") - }(namespace) - - /* Watch for changes and update snapshot */ - go func(namespace string) { - for { - select { - case <-ctx.Done(): - return - case upstreamList, ok := <-upstreamNamespacesChan: - if !ok { - return - } - select { - case <-ctx.Done(): - return - case upstreamChan <- upstreamListWithNamespace{list: upstreamList, namespace: namespace}: - } - case kubeServiceList, ok := <-kubeServiceNamespacesChan: - if !ok { - return - } - select { - case <-ctx.Done(): - return - case kubeServiceChan <- kubeServiceListWithNamespace{list: kubeServiceList, namespace: namespace}: - } - case ingressList, ok := <-ingressNamespacesChan: - if !ok { - return - } - select { - case <-ctx.Done(): - return - case ingressChan <- ingressListWithNamespace{list: ingressList, namespace: namespace}: - } - } - } - }(namespace) - } - /* Initialize snapshot for Upstreams */ - currentSnapshot.Upstreams = initialUpstreamList.Sort() - /* Initialize snapshot for Services */ - currentSnapshot.Services = initialKubeServiceList.Sort() - /* Initialize snapshot for Ingresses */ - currentSnapshot.Ingresses = initialIngressList.Sort() - - snapshots := make(chan *TranslatorSnapshot) - go func() { - // sent initial snapshot to kick off the watch - initialSnapshot := currentSnapshot.Clone() - snapshots <- &initialSnapshot - - timer := time.NewTicker(time.Second * 1) - previousHash, err := currentSnapshot.Hash(nil) - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - sync := func() { - currentHash, err := currentSnapshot.Hash(nil) - // this should never happen, so panic if it does - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - if previousHash == currentHash { - return - } - - sentSnapshot := currentSnapshot.Clone() - select { - case snapshots <- &sentSnapshot: - stats.Record(ctx, mTranslatorSnapshotOut.M(1)) - previousHash = currentHash - default: - stats.Record(ctx, mTranslatorSnapshotMissed.M(1)) - } - } - - defer func() { - close(snapshots) - // we must wait for done before closing the error chan, - // to avoid sending on close channel. - done.Wait() - close(errs) - }() - for { - record := func() { stats.Record(ctx, mTranslatorSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case upstreamNamespacedList, ok := <-upstreamChan: - if !ok { - return - } - record() - - namespace := upstreamNamespacedList.namespace - - skstats.IncrementResourceCount( - ctx, - namespace, - "upstream", - mTranslatorResourcesIn, - ) - - // merge lists by namespace - upstreamsByNamespace[namespace] = upstreamNamespacedList.list - var upstreamList gloo_solo_io.UpstreamList - for _, upstreams := range upstreamsByNamespace { - upstreamList = append(upstreamList, upstreams...) - } - currentSnapshot.Upstreams = upstreamList.Sort() - case kubeServiceNamespacedList, ok := <-kubeServiceChan: - if !ok { - return - } - record() - - namespace := kubeServiceNamespacedList.namespace - - skstats.IncrementResourceCount( - ctx, - namespace, - "kube_service", - mTranslatorResourcesIn, - ) - - // merge lists by namespace - servicesByNamespace[namespace] = kubeServiceNamespacedList.list - var kubeServiceList KubeServiceList - for _, services := range servicesByNamespace { - kubeServiceList = append(kubeServiceList, services...) - } - currentSnapshot.Services = kubeServiceList.Sort() - case ingressNamespacedList, ok := <-ingressChan: - if !ok { - return - } - record() - - namespace := ingressNamespacedList.namespace - - skstats.IncrementResourceCount( - ctx, - namespace, - "ingress", - mTranslatorResourcesIn, - ) - - // merge lists by namespace - ingressesByNamespace[namespace] = ingressNamespacedList.list - var ingressList IngressList - for _, ingresses := range ingressesByNamespace { - ingressList = append(ingressList, ingresses...) - } - currentSnapshot.Ingresses = ingressList.Sort() - } - } - }() - return snapshots, errs, nil -} diff --git a/projects/ingress/pkg/api/v1/translator_snapshot_simple_emitter.sk.go b/projects/ingress/pkg/api/v1/translator_snapshot_simple_emitter.sk.go deleted file mode 100644 index 50fd943e3e3..00000000000 --- a/projects/ingress/pkg/api/v1/translator_snapshot_simple_emitter.sk.go +++ /dev/null @@ -1,113 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - gloo_solo_io "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - - "go.opencensus.io/stats" - "go.uber.org/zap" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" -) - -type TranslatorSimpleEmitter interface { - Snapshots(ctx context.Context) (<-chan *TranslatorSnapshot, <-chan error, error) -} - -func NewTranslatorSimpleEmitter(aggregatedWatch clients.ResourceWatch) TranslatorSimpleEmitter { - return NewTranslatorSimpleEmitterWithEmit(aggregatedWatch, make(chan struct{})) -} - -func NewTranslatorSimpleEmitterWithEmit(aggregatedWatch clients.ResourceWatch, emit <-chan struct{}) TranslatorSimpleEmitter { - return &translatorSimpleEmitter{ - aggregatedWatch: aggregatedWatch, - forceEmit: emit, - } -} - -type translatorSimpleEmitter struct { - forceEmit <-chan struct{} - aggregatedWatch clients.ResourceWatch -} - -func (c *translatorSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TranslatorSnapshot, <-chan error, error) { - snapshots := make(chan *TranslatorSnapshot) - errs := make(chan error) - - untyped, watchErrs, err := c.aggregatedWatch(ctx) - if err != nil { - return nil, nil, err - } - - go errutils.AggregateErrs(ctx, errs, watchErrs, "translator-emitter") - - go func() { - currentSnapshot := TranslatorSnapshot{} - timer := time.NewTicker(time.Second * 1) - var previousHash uint64 - sync := func() { - currentHash, err := currentSnapshot.Hash(nil) - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - if previousHash == currentHash { - return - } - - previousHash = currentHash - - stats.Record(ctx, mTranslatorSnapshotOut.M(1)) - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - } - - defer func() { - close(snapshots) - close(errs) - }() - - for { - record := func() { stats.Record(ctx, mTranslatorSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case untypedList := <-untyped: - record() - - currentSnapshot = TranslatorSnapshot{} - for _, res := range untypedList { - switch typed := res.(type) { - case *gloo_solo_io.Upstream: - currentSnapshot.Upstreams = append(currentSnapshot.Upstreams, typed) - case *KubeService: - currentSnapshot.Services = append(currentSnapshot.Services, typed) - case *Ingress: - currentSnapshot.Ingresses = append(currentSnapshot.Ingresses, typed) - default: - select { - case errs <- fmt.Errorf("TranslatorSnapshotEmitter "+ - "cannot process resource %v of type %T", res.GetMetadata().Ref(), res): - case <-ctx.Done(): - return - } - } - } - - } - } - }() - return snapshots, errs, nil -} diff --git a/projects/ingress/pkg/setup/opts.go b/projects/ingress/pkg/setup/opts.go deleted file mode 100644 index a324b7afcd3..00000000000 --- a/projects/ingress/pkg/setup/opts.go +++ /dev/null @@ -1,25 +0,0 @@ -package setup - -import ( - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" -) - -type Opts struct { - ClusterIngressProxyAddress string - KnativeExternalProxyAddress string - KnativeInternalProxyAddress string - WriteNamespace string - StatusReporterNamespace string - WatchNamespaces []string - Proxies factory.ResourceClientFactory - Upstreams factory.ResourceClientFactory - Secrets factory.ResourceClientFactory - WatchOpts clients.WatchOpts - EnableKnative bool - KnativeVersion string - DisableKubeIngress bool - RequireIngressClass bool - CustomIngressClass string - IngressProxyLabel string -} diff --git a/projects/ingress/pkg/setup/setup.go b/projects/ingress/pkg/setup/setup.go deleted file mode 100644 index a2be1d73ef2..00000000000 --- a/projects/ingress/pkg/setup/setup.go +++ /dev/null @@ -1,19 +0,0 @@ -package setup - -import ( - "context" - - "github.com/solo-io/gloo/pkg/version" - - "github.com/solo-io/gloo/pkg/utils/setuputils" -) - -func Main(customCtx context.Context) error { - return setuputils.Main(setuputils.SetupOpts{ - LoggerName: "ingress", - Version: version.Version, - SetupFunc: Setup, - ExitOnError: true, - CustomCtx: customCtx, - }) -} diff --git a/projects/ingress/pkg/setup/setup_syncer.go b/projects/ingress/pkg/setup/setup_syncer.go deleted file mode 100644 index 394554121e6..00000000000 --- a/projects/ingress/pkg/setup/setup_syncer.go +++ /dev/null @@ -1,338 +0,0 @@ -package setup - -import ( - "context" - "os" - "strconv" - "strings" - - "github.com/solo-io/gloo/pkg/utils/kubeutils" - "github.com/solo-io/gloo/pkg/utils/settingsutil" - - "github.com/solo-io/gloo/pkg/bootstrap/leaderelector" - - "github.com/solo-io/gloo/pkg/utils/statusutils" - - "github.com/golang/protobuf/ptypes" - "github.com/solo-io/gloo/pkg/utils/namespaces" - clusteringressclient "github.com/solo-io/gloo/projects/clusteringress/pkg/api/custom/knative" - clusteringressv1alpha1 "github.com/solo-io/gloo/projects/clusteringress/pkg/api/external/knative" - clusteringressv1 "github.com/solo-io/gloo/projects/clusteringress/pkg/api/v1" - clusteringresstranslator "github.com/solo-io/gloo/projects/clusteringress/pkg/translator" - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - bootstrap "github.com/solo-io/gloo/projects/gloo/pkg/bootstrap/clients" - gloodefaults "github.com/solo-io/gloo/projects/gloo/pkg/defaults" - "github.com/solo-io/gloo/projects/ingress/pkg/api/ingress" - "github.com/solo-io/gloo/projects/ingress/pkg/api/service" - v1 "github.com/solo-io/gloo/projects/ingress/pkg/api/v1" - "github.com/solo-io/gloo/projects/ingress/pkg/status" - "github.com/solo-io/gloo/projects/ingress/pkg/translator" - knativeclient "github.com/solo-io/gloo/projects/knative/pkg/api/custom/knative" - knativev1alpha1 "github.com/solo-io/gloo/projects/knative/pkg/api/external/knative" - knativev1 "github.com/solo-io/gloo/projects/knative/pkg/api/v1" - knativetranslator "github.com/solo-io/gloo/projects/knative/pkg/translator" - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/cache" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" - "github.com/solo-io/solo-kit/pkg/errors" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - knativeclientset "knative.dev/networking/pkg/client/clientset/versioned" - "knative.dev/pkg/network" -) - -var defaultClusterIngressProxyAddress = "clusteringress-proxy." + gloodefaults.GlooSystem + ".svc." + network.GetClusterDomainName() - -var defaultKnativeExternalProxyAddress = "knative-external-proxy." + gloodefaults.GlooSystem + ".svc." + network.GetClusterDomainName() -var defaultKnativeInternalProxyAddress = "knative-internal-proxy." + gloodefaults.GlooSystem + ".svc." + network.GetClusterDomainName() - -func Setup(ctx context.Context, kubeCache kube.SharedCache, inMemoryCache memory.InMemoryResourceCache, settings *gloov1.Settings, _ leaderelector.Identity) error { - var ( - cfg *rest.Config - clientset kubernetes.Interface - kubeCoreCache cache.KubeCoreCache - ) - - params := bootstrap.NewConfigFactoryParams( - settings, - inMemoryCache, - kubeCache, - &cfg, - nil, // no consul client for ingress controller - ) - - proxyFactory, err := bootstrap.ConfigFactoryForSettings(params, gloov1.ProxyCrd) - if err != nil { - return err - } - - upstreamFactory, err := bootstrap.ConfigFactoryForSettings(params, gloov1.UpstreamCrd) - if err != nil { - return err - } - - secretFactory, err := bootstrap.SecretFactoryForSettings( - ctx, - bootstrap.SecretFactoryParams{ - Settings: settings, - SharedCache: inMemoryCache, - Cfg: &cfg, - Clientset: &clientset, - KubeCoreCache: &kubeCoreCache, - VaultClientInitMap: nil, - PluralName: gloov1.SecretCrd.Plural, - }) - if err != nil { - return err - } - - refreshRate, err := ptypes.Duration(settings.GetRefreshRate()) - if err != nil { - return err - } - - writeNamespace := settings.GetDiscoveryNamespace() - if writeNamespace == "" { - writeNamespace = gloodefaults.GlooSystem - } - statusReporterNamespace := statusutils.GetStatusReporterNamespaceOrDefault(writeNamespace) - - watchNamespaces := namespaces.ProcessWatchNamespaces(settingsutil.GetNamespacesToWatch(settings), writeNamespace) - - envTrue := func(name string) bool { - return os.Getenv(name) == "true" || os.Getenv(name) == "1" - } - - disableKubeIngress := envTrue("DISABLE_KUBE_INGRESS") - requireIngressClass := envTrue("REQUIRE_INGRESS_CLASS") - enableKnative := envTrue("ENABLE_KNATIVE_INGRESS") - customIngressClass := os.Getenv("CUSTOM_INGRESS_CLASS") - knativeVersion := os.Getenv("KNATIVE_VERSION") - ingressProxyLabel := os.Getenv("INGRESS_PROXY_LABEL") - - clusterIngressProxyAddress := defaultClusterIngressProxyAddress - if settings.GetKnative() != nil && settings.GetKnative().GetClusterIngressProxyAddress() != "" { - clusterIngressProxyAddress = settings.GetKnative().GetClusterIngressProxyAddress() - } - - knativeExternalProxyAddress := defaultKnativeExternalProxyAddress - if settings.GetKnative() != nil && settings.GetKnative().GetKnativeExternalProxyAddress() != "" { - knativeExternalProxyAddress = settings.GetKnative().GetKnativeExternalProxyAddress() - } - - knativeInternalProxyAddress := defaultKnativeInternalProxyAddress - if settings.GetKnative() != nil && settings.GetKnative().GetKnativeInternalProxyAddress() != "" { - knativeInternalProxyAddress = settings.GetKnative().GetKnativeInternalProxyAddress() - } - - if len(ingressProxyLabel) == 0 { - ingressProxyLabel = "ingress-proxy" - } - - opts := Opts{ - ClusterIngressProxyAddress: clusterIngressProxyAddress, - KnativeExternalProxyAddress: knativeExternalProxyAddress, - KnativeInternalProxyAddress: knativeInternalProxyAddress, - WriteNamespace: writeNamespace, - StatusReporterNamespace: statusReporterNamespace, - WatchNamespaces: watchNamespaces, - Proxies: proxyFactory, - Upstreams: upstreamFactory, - Secrets: secretFactory, - WatchOpts: clients.WatchOpts{ - Ctx: ctx, - RefreshRate: refreshRate, - }, - EnableKnative: enableKnative, - KnativeVersion: knativeVersion, - DisableKubeIngress: disableKubeIngress, - RequireIngressClass: requireIngressClass, - CustomIngressClass: customIngressClass, - IngressProxyLabel: ingressProxyLabel, - } - - return RunIngress(opts) -} - -func RunIngress(opts Opts) error { - opts.WatchOpts = opts.WatchOpts.WithDefaults() - opts.WatchOpts.Ctx = contextutils.WithLogger(opts.WatchOpts.Ctx, "ingress") - - if opts.DisableKubeIngress && !opts.EnableKnative { - return errors.Errorf("ingress controller must be enabled for either Knative (clusteringress) or " + - "basic kubernetes ingress. set DISABLE_KUBE_INGRESS=0 or ENABLE_KNATIVE_INGRESS=1") - } - - cfg, err := kubeutils.GetRestConfigWithKubeContext("") - if err != nil { - return errors.Wrapf(err, "getting kube config") - } - - proxyClient, err := gloov1.NewProxyClient(opts.WatchOpts.Ctx, opts.Proxies) - if err != nil { - return err - } - if err := proxyClient.Register(); err != nil { - return err - } - writeErrs := make(chan error) - - if !opts.DisableKubeIngress { - kube, err := kubernetes.NewForConfig(cfg) - if err != nil { - return errors.Wrapf(err, "getting kube client") - } - - upstreamClient, err := gloov1.NewUpstreamClient(opts.WatchOpts.Ctx, opts.Upstreams) - if err != nil { - return err - } - if err := upstreamClient.Register(); err != nil { - return err - } - - baseIngressClient := ingress.NewResourceClient(kube, &v1.Ingress{}) - ingressClient := v1.NewIngressClientWithBase(baseIngressClient) - - baseKubeServiceClient := service.NewResourceClient(kube, &v1.KubeService{}) - kubeServiceClient := v1.NewKubeServiceClientWithBase(baseKubeServiceClient) - - translatorEmitter := v1.NewTranslatorEmitter(upstreamClient, kubeServiceClient, ingressClient) - statusClient := statusutils.GetStatusClientForNamespace(opts.StatusReporterNamespace) - translatorSync := translator.NewSyncer( - opts.WriteNamespace, - proxyClient, - ingressClient, - writeErrs, - opts.RequireIngressClass, - opts.CustomIngressClass, - statusClient) - translatorEventLoop := v1.NewTranslatorEventLoop(translatorEmitter, translatorSync) - translatorEventLoopErrs, err := translatorEventLoop.Run(opts.WatchNamespaces, opts.WatchOpts) - if err != nil { - return err - } - go errutils.AggregateErrs(opts.WatchOpts.Ctx, writeErrs, translatorEventLoopErrs, "ingress_translator_event_loop") - - // note (ilackarms): we must set the selector correctly here or the status syncer will not work - // the selector should return exactly 1 service which is our .ingress-proxy service - ingressServiceClient := service.NewClientWithSelector(kubeServiceClient, map[string]string{ - "gloo": opts.IngressProxyLabel, - }) - statusEmitter := v1.NewStatusEmitter(ingressServiceClient, ingressClient) - statusSync := status.NewSyncer(ingressClient) - statusEventLoop := v1.NewStatusEventLoop(statusEmitter, statusSync) - statusEventLoopErrs, err := statusEventLoop.Run(opts.WatchNamespaces, opts.WatchOpts) - if err != nil { - return err - } - go errutils.AggregateErrs(opts.WatchOpts.Ctx, writeErrs, statusEventLoopErrs, "ingress_status_event_loop") - } - - logger := contextutils.LoggerFrom(opts.WatchOpts.Ctx) - - if opts.EnableKnative { - knative, err := knativeclientset.NewForConfig(cfg) - if err != nil { - return errors.Wrapf(err, "creating knative clientset") - } - - // if the version of the target knative is < 0.8.0 (or version not provided), use clusteringress - // else, use the new knative ingress object - if pre080knativeVersion(opts.KnativeVersion) { - logger.Infof("starting Ingress with KNative (ClusterIngress) support enabled") - knativeCache, err := clusteringressclient.NewClusterIngreessCache(opts.WatchOpts.Ctx, knative) - if err != nil { - return errors.Wrapf(err, "creating knative cache") - } - baseClient := clusteringressclient.NewResourceClient(knative, knativeCache) - ingressClient := clusteringressv1alpha1.NewClusterIngressClientWithBase(baseClient) - clusterIngTranslatorEmitter := clusteringressv1.NewTranslatorEmitter(ingressClient) - statusClient := statusutils.GetStatusClientForNamespace(opts.StatusReporterNamespace) - clusterIngTranslatorSync := clusteringresstranslator.NewSyncer( - opts.ClusterIngressProxyAddress, - opts.WriteNamespace, - proxyClient, - knative.NetworkingV1alpha1(), - statusClient, - writeErrs, - ) - clusterIngTranslatorEventLoop := clusteringressv1.NewTranslatorEventLoop(clusterIngTranslatorEmitter, clusterIngTranslatorSync) - clusterIngTranslatorEventLoopErrs, err := clusterIngTranslatorEventLoop.Run(opts.WatchNamespaces, opts.WatchOpts) - if err != nil { - return err - } - go errutils.AggregateErrs(opts.WatchOpts.Ctx, writeErrs, clusterIngTranslatorEventLoopErrs, "cluster_ingress_translator_event_loop") - } else { - logger.Infof("starting Ingress with KNative (Ingress) support enabled") - knativeCache, err := knativeclient.NewIngressCache(opts.WatchOpts.Ctx, knative) - if err != nil { - return errors.Wrapf(err, "creating knative cache") - } - baseClient := knativeclient.NewResourceClient(knative, knativeCache) - ingressClient := knativev1alpha1.NewIngressClientWithBase(baseClient) - knativeTranslatorEmitter := knativev1.NewTranslatorEmitter(ingressClient) - statusClient := statusutils.GetStatusClientForNamespace(opts.StatusReporterNamespace) - knativeTranslatorSync := knativetranslator.NewSyncer( - opts.KnativeExternalProxyAddress, - opts.KnativeInternalProxyAddress, - opts.WriteNamespace, - proxyClient, - knative.NetworkingV1alpha1(), - writeErrs, - opts.RequireIngressClass, - statusClient, - ) - knativeTranslatorEventLoop := knativev1.NewTranslatorEventLoop(knativeTranslatorEmitter, knativeTranslatorSync) - knativeTranslatorEventLoopErrs, err := knativeTranslatorEventLoop.Run(opts.WatchNamespaces, opts.WatchOpts) - if err != nil { - return err - } - go errutils.AggregateErrs(opts.WatchOpts.Ctx, writeErrs, knativeTranslatorEventLoopErrs, "knative_ingress_translator_event_loop") - } - } - - go func() { - for { - select { - case err := <-writeErrs: - logger.Errorf("error: %v", err) - case <-opts.WatchOpts.Ctx.Done(): - close(writeErrs) - return - } - } - }() - return nil -} - -// change this to set whether we default to assuming -// knative is pre-0.8.0 in the absence of a valid version parameter -const defaultPre080 = true - -func pre080knativeVersion(version string) bool { - // expected format: 0.8.0 - parts := strings.Split(version, ".") - if len(parts) != 3 { - // default case is true - return defaultPre080 - } - major, err := strconv.Atoi(parts[0]) - if err != nil { - return defaultPre080 - } - if major > 0 { - return false - } - minor, err := strconv.Atoi(parts[1]) - if err != nil { - return defaultPre080 - } - if minor >= 8 { - return false - } - return true -} diff --git a/projects/ingress/pkg/status/status_syncer.go b/projects/ingress/pkg/status/status_syncer.go deleted file mode 100644 index a3a3848b299..00000000000 --- a/projects/ingress/pkg/status/status_syncer.go +++ /dev/null @@ -1,146 +0,0 @@ -package status - -import ( - "context" - "net" - "sort" - - corev1 "k8s.io/api/core/v1" - networkv1 "k8s.io/api/networking/v1" - - "github.com/solo-io/gloo/pkg/utils/syncutil" - "github.com/solo-io/go-utils/hashutils" - "go.uber.org/zap/zapcore" - - "github.com/golang/protobuf/proto" - errors "github.com/rotisserie/eris" - "github.com/solo-io/gloo/projects/ingress/pkg/api/ingress" - "github.com/solo-io/gloo/projects/ingress/pkg/api/service" - v1 "github.com/solo-io/gloo/projects/ingress/pkg/api/v1" - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" -) - -type statusSyncer struct { - ingressClient v1.IngressClient -} - -func NewSyncer(ingressClient v1.IngressClient) v1.StatusSyncer { - return &statusSyncer{ - ingressClient: ingressClient, - } -} - -// TODO (ilackarms): make sure that sync happens if proxies get updated as well; may need to resync -func (s *statusSyncer) Sync(ctx context.Context, snap *v1.StatusSnapshot) error { - ctx = contextutils.WithLogger(ctx, "statusSyncer") - snapHash := hashutils.MustHash(snap) - logger := contextutils.LoggerFrom(ctx) - logger.Infof("begin sync %v (%v ingresses, %v services)", snapHash, - len(snap.Ingresses), len(snap.Services)) - defer logger.Infof("end sync %v", snapHash) - services := snap.Services - - // stringifying the snapshot may be an expensive operation, so we'd like to avoid building the large - // string if we're not even going to log it anyway - if contextutils.GetLogLevel() == zapcore.DebugLevel { - logger.Debug(syncutil.StringifySnapshot(snap)) - } - - lbStatus, err := getLbStatus(services) - if err != nil { - return err - } - - for _, ing := range snap.Ingresses { - kubeIngress, err := ingress.ToKube(ing) - if err != nil { - return errors.Wrapf(err, "internal error: converting proto ingress to kube ingress") - } - kubeIngress.Status.LoadBalancer.Ingress = lbStatus - - updatedIngress, err := ingress.FromKube(kubeIngress) - if err != nil { - return errors.Wrapf(err, "internal error: converting back to proto ingress from kube ingress") - } - - if proto.Equal(updatedIngress.GetKubeIngressStatus(), ing.GetKubeIngressStatus()) { - continue - } - if _, err := s.ingressClient.Write(updatedIngress, clients.WriteOpts{Ctx: ctx, OverwriteExisting: true}); err != nil { - return errors.Wrapf(err, "writing updated status to kubernetes") - } - logger.Infof("updated ingress %v with status %v", ing.GetMetadata().Ref(), lbStatus) - } - - return nil -} - -func getLbStatus(services v1.KubeServiceList) ([]networkv1.IngressLoadBalancerIngress, error) { - switch len(services) { - case 0: - return nil, nil - case 1: - kubeSvc, err := service.ToKube(services[0]) - if err != nil { - return nil, errors.Wrapf(err, "internal error: converting proto svc to kube service") - } - - kubeSvcRef := services[0].GetMetadata().Ref() - kubeSvcAddrs, err := serviceAddrs(kubeSvc, kubeSvcRef) - if err != nil { - return nil, errors.Wrapf(err, "internal err: extracting service addrs from kube service") - } - - return ingressStatusFromAddrs(kubeSvcAddrs), nil - } - return nil, errors.Errorf("failed to get lb status: expected 1 ingress service, found %v", func() []*core.ResourceRef { - var refs []*core.ResourceRef - for _, svc := range services { - refs = append(refs, svc.GetMetadata().Ref()) - } - return refs - }()) -} - -func serviceAddrs(svc *corev1.Service, kubeSvcRef *core.ResourceRef) ([]string, error) { - if svc.Spec.Type == corev1.ServiceTypeExternalName { - - // Remove the possibility of using localhost in ExternalNames as endpoints - svcIp := net.ParseIP(svc.Spec.ExternalName) - if svc.Spec.ExternalName == "localhost" || (svcIp != nil && svcIp.IsLoopback()) { - return nil, errors.Errorf("Invalid attempt to use localhost name %s, in %v", svc.Spec.ExternalName, kubeSvcRef) - } - return []string{svc.Spec.ExternalName}, nil - } - var addrs []string - - for _, ip := range svc.Status.LoadBalancer.Ingress { - if ip.IP == "" { - addrs = append(addrs, ip.Hostname) - } else { - addrs = append(addrs, ip.IP) - } - } - addrs = append(addrs, svc.Spec.ExternalIPs...) - - return addrs, nil -} - -func ingressStatusFromAddrs(addrs []string) []networkv1.IngressLoadBalancerIngress { - var lbi []networkv1.IngressLoadBalancerIngress - for _, ep := range addrs { - if net.ParseIP(ep) == nil { - lbi = append(lbi, networkv1.IngressLoadBalancerIngress{Hostname: ep}) - } else { - lbi = append(lbi, networkv1.IngressLoadBalancerIngress{IP: ep}) - } - } - - sort.SliceStable(lbi, func(a, b int) bool { - return lbi[a].IP < lbi[b].IP - }) - - return lbi -} diff --git a/projects/ingress/pkg/translator/translate.go b/projects/ingress/pkg/translator/translate.go deleted file mode 100644 index 319c910de26..00000000000 --- a/projects/ingress/pkg/translator/translate.go +++ /dev/null @@ -1,291 +0,0 @@ -package translator - -import ( - "context" - "sort" - - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/core/matchers" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/ssl" - "github.com/solo-io/gloo/projects/ingress/pkg/api/service" - "github.com/solo-io/go-utils/contextutils" - corev1 "k8s.io/api/core/v1" - - errors "github.com/rotisserie/eris" - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - glooutils "github.com/solo-io/gloo/projects/gloo/pkg/utils" - "github.com/solo-io/gloo/projects/ingress/pkg/api/ingress" - v1 "github.com/solo-io/gloo/projects/ingress/pkg/api/v1" - "github.com/solo-io/go-utils/log" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - networkingv1 "k8s.io/api/networking/v1" -) - -const defaultIngressClass = "gloo" - -const IngressClassKey = "kubernetes.io/ingress.class" - -func translateProxy(ctx context.Context, namespace string, snap *v1.TranslatorSnapshot, requireIngressClass bool, ingressClass string) *gloov1.Proxy { - - if ingressClass == "" { - ingressClass = defaultIngressClass - } - - var ingresses []*networkingv1.Ingress - for _, ig := range snap.Ingresses { - kubeIngress, err := ingress.ToKube(ig) - if err != nil { - contextutils.LoggerFrom(ctx).Errorf("internal error: parsing internal ingress representation: %v", err) - continue - } - ingresses = append(ingresses, kubeIngress) - } - - var services []*corev1.Service - for _, svc := range snap.Services { - kubeSvc, err := service.ToKube(svc) - if err != nil { - contextutils.LoggerFrom(ctx).Errorf("internal error: parsing internal service representation: %v", err) - continue - } - services = append(services, kubeSvc) - } - - upstreams := snap.Upstreams - - virtualHostsHttp, secureVirtualHosts := virtualHosts(ctx, ingresses, upstreams, services, requireIngressClass, ingressClass) - - var virtualHostsHttps []*gloov1.VirtualHost - var sslConfigs []*ssl.SslConfig - for _, svh := range secureVirtualHosts { - svh := svh - virtualHostsHttps = append(virtualHostsHttps, svh.vh) - sslConfigs = append(sslConfigs, &ssl.SslConfig{ - SslSecrets: &ssl.SslConfig_SecretRef{ - SecretRef: &svh.secret, - }, - SniDomains: svh.vh.GetDomains(), - }) - } - var listeners []*gloov1.Listener - if len(virtualHostsHttp) > 0 { - listeners = append(listeners, &gloov1.Listener{ - Name: "http", - BindAddress: "::", - BindPort: 8080, - ListenerType: &gloov1.Listener_HttpListener{ - HttpListener: &gloov1.HttpListener{ - VirtualHosts: virtualHostsHttp, - }, - }, - }) - } - if len(virtualHostsHttps) > 0 { - listeners = append(listeners, &gloov1.Listener{ - Name: "https", - BindAddress: "::", - BindPort: 8443, - ListenerType: &gloov1.Listener_HttpListener{ - HttpListener: &gloov1.HttpListener{ - VirtualHosts: virtualHostsHttps, - }, - }, - SslConfigurations: sslConfigs, - }) - } - return &gloov1.Proxy{ - Metadata: &core.Metadata{ - Name: "ingress-proxy", // must match envoy role - Namespace: namespace, - }, - Listeners: listeners, - } -} - -func upstreamForBackend(upstreams gloov1.UpstreamList, services []*corev1.Service, ingressNamespace string, backend networkingv1.IngressBackend) (*gloov1.Upstream, error) { - serviceName, servicePort, err := getServiceNameAndPort(services, ingressNamespace, backend.Service) - if err != nil { - return nil, err - } - - // find the upstream with the smallest matching selector - // longer selectors represent subsets of pods for a service - var matchingUpstream *gloov1.Upstream - for _, us := range upstreams { - switch spec := us.GetUpstreamType().(type) { - case *gloov1.Upstream_Kube: - if spec.Kube.GetServiceNamespace() == ingressNamespace && - spec.Kube.GetServiceName() == serviceName && - spec.Kube.GetServicePort() == uint32(servicePort) { - if matchingUpstream != nil { - originalSelectorLength := len(matchingUpstream.GetUpstreamType().(*gloov1.Upstream_Kube).Kube.GetSelector()) - newSelectorLength := len(spec.Kube.GetSelector()) - if newSelectorLength > originalSelectorLength { - continue - } - } - matchingUpstream = us - } - } - } - if matchingUpstream == nil { - return nil, errors.Errorf("discovery failure: upstream not found for kube service %v with port %v", serviceName, servicePort) - } - return matchingUpstream, nil -} - -// getServiceNameAndPort returns the service name and port number for an IngressServiceBackend or an error if the -// defined IngressServiceBackend does not match any available services. -// An IngressServiceBackend can have have its port defined either by number or name, so we must handle both cases -func getServiceNameAndPort(services []*corev1.Service, namespace string, ingressService *networkingv1.IngressServiceBackend) (string, int32, error) { - if ingressService == nil { - return "", 0, errors.New("no service specified for ingress backend") - } - serviceName := ingressService.Name - // If the IngressServiceBackend defines a named port, we first find the service by name/namespace - // and then determine the port number which maps to the port name - if ingressService.Port.Name != "" { - portName := ingressService.Port.Name - for _, svc := range services { - if svc.Name == serviceName && svc.Namespace == namespace { - for _, port := range svc.Spec.Ports { - if port.Name == portName { - return serviceName, port.Port, nil - } - } - return "", 0, errors.Errorf("port %v not found for service %v.%v", portName, serviceName, namespace) - } - } - return "", 0, errors.Errorf("service %v.%v not found", serviceName, namespace) - } - - return serviceName, ingressService.Port.Number, nil -} - -type secureVirtualHost struct { - vh *gloov1.VirtualHost - secret core.ResourceRef -} - -func virtualHosts(ctx context.Context, ingresses []*networkingv1.Ingress, upstreams gloov1.UpstreamList, services []*corev1.Service, requireIngressClass bool, ingressClass string) ([]*gloov1.VirtualHost, []secureVirtualHost) { - routesByHostHttp := make(map[string][]*gloov1.Route) - routesByHostHttps := make(map[string][]*gloov1.Route) - secretsByHost := make(map[string]*core.ResourceRef) - var defaultBackend *networkingv1.IngressBackend - for _, ing := range ingresses { - if requireIngressClass && !isOurIngress(ing, ingressClass) { - continue - } - spec := ing.Spec - if spec.DefaultBackend != nil { - if defaultBackend != nil { - contextutils.LoggerFrom(ctx).Warnf("default backend was redeclared in ingress %v, ignoring", ing.Name) - continue - } - defaultBackend = spec.DefaultBackend - } - for _, tls := range spec.TLS { - - ref := core.ResourceRef{ - Name: tls.SecretName, - Namespace: ing.Namespace, - } - for _, host := range tls.Hosts { - if existing, alreadySet := secretsByHost[host]; alreadySet { - if existing.GetName() != ref.GetName() || existing.GetNamespace() != ref.GetNamespace() { - log.Warnf("a TLS secret for host %v was redefined in ingress %v, ignoring", ing.Name) - continue - } - } - secretsByHost[host] = &ref - } - } - - for i, rule := range spec.Rules { - host := rule.Host - if host == "" { - host = "*" - } - // set a "default route" - if rule.HTTP == nil { - log.Warnf("rule %v in ingress %v is missing HTTP field", i, ing.Name) - continue - } - for _, route := range rule.HTTP.Paths { - upstream, err := upstreamForBackend(upstreams, services, ing.Namespace, route.Backend) - if err != nil { - contextutils.LoggerFrom(ctx).Errorf("lookup upstream for ingress %v: %v", ing.Name, err) - continue - } - - pathRegex := route.Path - if pathRegex == "" { - pathRegex = ".*" - } - route := &gloov1.Route{ - Matchers: []*matchers.Matcher{{ - PathSpecifier: &matchers.Matcher_Regex{ - Regex: pathRegex, - }, - }}, - Action: &gloov1.Route_RouteAction{ - RouteAction: &gloov1.RouteAction{ - Destination: &gloov1.RouteAction_Single{ - Single: &gloov1.Destination{ - DestinationType: &gloov1.Destination_Upstream{ - Upstream: upstream.GetMetadata().Ref(), - }, - }, - }, - }, - }, - } - if _, useTls := secretsByHost[host]; useTls { - routesByHostHttps[host] = append(routesByHostHttps[host], route) - } else { - routesByHostHttp[host] = append(routesByHostHttp[host], route) - } - } - } - } - - var virtualHostsHttp []*gloov1.VirtualHost - var virtualHostsHttps []secureVirtualHost - - for host, routes := range routesByHostHttp { - glooutils.SortRoutesByPath(routes) - virtualHostsHttp = append(virtualHostsHttp, &gloov1.VirtualHost{ - Name: host + "-http", - Domains: []string{host, host + ":8080"}, - Routes: routes, - }) - } - - for host, routes := range routesByHostHttps { - glooutils.SortRoutesByPath(routes) - secret, ok := secretsByHost[host] - if !ok { - contextutils.LoggerFrom(ctx).Errorf("internal error: secret not found for host %v after processing ingresses", host) - continue - } - virtualHostsHttps = append(virtualHostsHttps, secureVirtualHost{ - vh: &gloov1.VirtualHost{ - Name: host + "-https", - Domains: []string{host, host + ":8443"}, - Routes: routes, - }, - secret: *secret, - }) - } - - sort.SliceStable(virtualHostsHttp, func(i, j int) bool { - return virtualHostsHttp[i].GetName() < virtualHostsHttp[j].GetName() - }) - sort.SliceStable(virtualHostsHttps, func(i, j int) bool { - return virtualHostsHttps[i].vh.GetName() < virtualHostsHttps[j].vh.GetName() - }) - return virtualHostsHttp, virtualHostsHttps -} - -func isOurIngress(ingress *networkingv1.Ingress, ingressClassToUse string) bool { - return ingress.Annotations[IngressClassKey] == ingressClassToUse -} diff --git a/projects/ingress/pkg/translator/translate_test.go b/projects/ingress/pkg/translator/translate_test.go deleted file mode 100644 index 6329f638235..00000000000 --- a/projects/ingress/pkg/translator/translate_test.go +++ /dev/null @@ -1,631 +0,0 @@ -package translator - -import ( - "context" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/core/matchers" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/options/kubernetes" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/ssl" - ingresstype "github.com/solo-io/gloo/projects/ingress/pkg/api/ingress" - "github.com/solo-io/gloo/projects/ingress/pkg/api/service" - v1 "github.com/solo-io/gloo/projects/ingress/pkg/api/v1" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "sigs.k8s.io/yaml" -) - -var _ = Describe("Translate", func() { - var ( - ctx = context.Background() - ) - - It("creates the appropriate proxy object for the provided ingress objects", func() { - testIngressTranslate := func(requireIngressClass bool) { - - namespace := "example" - serviceName := "wow-service" - servicePort := int32(8080) - secretName := "areallygreatsecret" - pathType := networkingv1.PathTypeImplementationSpecific - ingress := &networkingv1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ing", - Namespace: namespace, - Annotations: map[string]string{ - IngressClassKey: "gloo", - }, - }, - Spec: networkingv1.IngressSpec{ - Rules: []networkingv1.IngressRule{ - { - Host: "wow.com", - IngressRuleValue: networkingv1.IngressRuleValue{ - HTTP: &networkingv1.HTTPIngressRuleValue{ - Paths: []networkingv1.HTTPIngressPath{ - { - Path: "/", - PathType: &pathType, - Backend: networkingv1.IngressBackend{ - Service: &networkingv1.IngressServiceBackend{ - Name: serviceName, - Port: networkingv1.ServiceBackendPort{ - Number: servicePort, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - ingressTls := &networkingv1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ing-tls", - Namespace: namespace, - Annotations: map[string]string{ - IngressClassKey: "gloo", - }, - }, - Spec: networkingv1.IngressSpec{ - TLS: []networkingv1.IngressTLS{ - { - Hosts: []string{"wow.com"}, - SecretName: secretName, - }, - }, - Rules: []networkingv1.IngressRule{ - { - Host: "wow.com", - IngressRuleValue: networkingv1.IngressRuleValue{ - HTTP: &networkingv1.HTTPIngressRuleValue{ - Paths: []networkingv1.HTTPIngressPath{ - { - Path: "/basic", - PathType: &pathType, - Backend: networkingv1.IngressBackend{ - Service: &networkingv1.IngressServiceBackend{ - Name: serviceName, - Port: networkingv1.ServiceBackendPort{ - Number: servicePort, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - ingressTls2 := &networkingv1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ing-tls-2", - Namespace: namespace, - Annotations: map[string]string{ - IngressClassKey: "gloo", - }, - }, - Spec: networkingv1.IngressSpec{ - TLS: []networkingv1.IngressTLS{ - { - Hosts: []string{"wow.com"}, - SecretName: secretName, - }, - }, - Rules: []networkingv1.IngressRule{ - { - Host: "wow.com", - IngressRuleValue: networkingv1.IngressRuleValue{ - HTTP: &networkingv1.HTTPIngressRuleValue{ - Paths: []networkingv1.HTTPIngressPath{ - { - Path: "/longestpathshouldcomesecond", - PathType: &pathType, - Backend: networkingv1.IngressBackend{ - Service: &networkingv1.IngressServiceBackend{ - Name: serviceName, - Port: networkingv1.ServiceBackendPort{ - Number: servicePort, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - if !requireIngressClass { - ingress.Annotations = nil - ingressTls.Annotations = nil - ingressTls2.Annotations = nil - } - ingressRes, err := ingresstype.FromKube(ingress) - Expect(err).NotTo(HaveOccurred()) - ingressResTls, err := ingresstype.FromKube(ingressTls) - Expect(err).NotTo(HaveOccurred()) - ingressResTls2, err := ingresstype.FromKube(ingressTls2) - Expect(err).NotTo(HaveOccurred()) - us := &gloov1.Upstream{ - Metadata: &core.Metadata{ - Namespace: namespace, - Name: "wow-upstream", - }, - UpstreamType: &gloov1.Upstream_Kube{ - Kube: &kubernetes.UpstreamSpec{ - ServiceNamespace: namespace, - ServiceName: serviceName, - ServicePort: uint32(servicePort), - Selector: map[string]string{ - "a": "b", - }, - }, - }, - } - usSubset := &gloov1.Upstream{ - Metadata: &core.Metadata{ - Namespace: namespace, - Name: "wow-upstream-subset", - }, - UpstreamType: &gloov1.Upstream_Kube{ - Kube: &kubernetes.UpstreamSpec{ - ServiceName: serviceName, - ServicePort: uint32(servicePort), - Selector: map[string]string{ - "a": "b", - "c": "d", - }, - }, - }, - } - snap := &v1.TranslatorSnapshot{ - Ingresses: v1.IngressList{ingressRes, ingressResTls, ingressResTls2}, - Upstreams: gloov1.UpstreamList{us, usSubset}, - } - proxy := translateProxy(ctx, namespace, snap, requireIngressClass, "") - - Expect(proxy.String()).To(Equal((&gloov1.Proxy{ - Listeners: []*gloov1.Listener{ - { - Name: "http", - BindAddress: "::", - BindPort: 8080, - ListenerType: &gloov1.Listener_HttpListener{ - HttpListener: &gloov1.HttpListener{ - VirtualHosts: []*gloov1.VirtualHost{ - { - Name: "wow.com-http", - Domains: []string{ - "wow.com", - "wow.com:8080", - }, - Routes: []*gloov1.Route{ - { - Matchers: []*matchers.Matcher{{ - PathSpecifier: &matchers.Matcher_Regex{ - Regex: "/", - }, - }}, - Action: &gloov1.Route_RouteAction{ - RouteAction: &gloov1.RouteAction{ - Destination: &gloov1.RouteAction_Single{ - Single: &gloov1.Destination{ - DestinationType: &gloov1.Destination_Upstream{ - Upstream: &core.ResourceRef{ - Name: "wow-upstream", - Namespace: "example", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - Name: "https", - BindAddress: "::", - BindPort: 8443, - ListenerType: &gloov1.Listener_HttpListener{ - HttpListener: &gloov1.HttpListener{ - VirtualHosts: []*gloov1.VirtualHost{ - { - Name: "wow.com-https", - Domains: []string{ - "wow.com", - "wow.com:8443", - }, - Routes: []*gloov1.Route{ - { - Matchers: []*matchers.Matcher{{ - PathSpecifier: &matchers.Matcher_Regex{ - Regex: "/longestpathshouldcomesecond", - }, - }}, - Action: &gloov1.Route_RouteAction{ - RouteAction: &gloov1.RouteAction{ - Destination: &gloov1.RouteAction_Single{ - Single: &gloov1.Destination{ - DestinationType: &gloov1.Destination_Upstream{ - Upstream: &core.ResourceRef{ - Name: "wow-upstream", - Namespace: "example", - }, - }, - }, - }, - }, - }, - }, - { - Matchers: []*matchers.Matcher{{ - PathSpecifier: &matchers.Matcher_Regex{ - Regex: "/basic", - }, - }}, - Action: &gloov1.Route_RouteAction{ - RouteAction: &gloov1.RouteAction{ - Destination: &gloov1.RouteAction_Single{ - Single: &gloov1.Destination{ - DestinationType: &gloov1.Destination_Upstream{ - Upstream: &core.ResourceRef{ - Name: "wow-upstream", - Namespace: "example", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - SslConfigurations: []*ssl.SslConfig{ - { - SslSecrets: &ssl.SslConfig_SecretRef{ - SecretRef: &core.ResourceRef{ - Name: "areallygreatsecret", - Namespace: "example", - }, - }, - SniDomains: []string{"wow.com", "wow.com:8443"}, - }, - }, - }, - }, - Metadata: &core.Metadata{ - Name: "ingress-proxy", - Namespace: "example", - }, - }).String())) - } - testIngressTranslate(true) - testIngressTranslate(false) - }) - - It("handles multiple secrets correctly", func() { - ingresses := func() v1.IngressList { - var ingressList networkingv1.IngressList - err := yaml.Unmarshal([]byte(ingressExampleYaml), &ingressList) - Expect(err).NotTo(HaveOccurred()) - - var ingresses v1.IngressList - for _, item := range ingressList.Items { - ingress, err := ingresstype.FromKube(&item) - Expect(err).NotTo(HaveOccurred()) - ingresses = append(ingresses, ingress) - } - return ingresses - }() - - us1 := &gloov1.Upstream{ - Metadata: &core.Metadata{Namespace: "gloo-system", Name: "amoeba-dev-api-gateway-amoeba-dev-8080"}, - UpstreamType: &gloov1.Upstream_Kube{ - Kube: &kubernetes.UpstreamSpec{ - ServiceNamespace: "amoeba-dev", - ServiceName: "api-gateway-amoeba-dev", - ServicePort: uint32(8080), - }, - }, - } - - us2 := &gloov1.Upstream{ - Metadata: &core.Metadata{Namespace: "gloo-system", Name: "amoeba-dev-api-gateway-amoeba-dev-8080"}, - UpstreamType: &gloov1.Upstream_Kube{ - Kube: &kubernetes.UpstreamSpec{ - ServiceNamespace: "amoeba-dev", - ServiceName: "amoeba-ui", - ServicePort: uint32(8080), - }, - }, - } - snap := &v1.TranslatorSnapshot{ - Ingresses: ingresses, - Upstreams: gloov1.UpstreamList{us1, us2}, - } - - proxy := translateProxy(ctx, "gloo-system", snap, false, "") - - Expect(proxy.Listeners).To(HaveLen(1)) - Expect(proxy.Listeners[0].SslConfigurations).To(Equal([]*ssl.SslConfig{ - { - SslSecrets: &ssl.SslConfig_SecretRef{ - SecretRef: &core.ResourceRef{ - Name: "amoeba-api-ingress-secret", - Namespace: "amoeba-dev", - }, - }, - SniDomains: []string{ - "api-dev.intellishift.com", - "api-dev.intellishift.com:8443", - }, - }, - { - SslSecrets: &ssl.SslConfig_SecretRef{ - SecretRef: &core.ResourceRef{ - Name: "amoeba-ui-ingress-secret", - Namespace: "amoeba-dev", - }, - }, - SniDomains: []string{ - "ui-dev.intellishift.com", - "ui-dev.intellishift.com:8443", - }, - }, - })) - }) - - It("produces a proxy for valid ingresses and ignores invalid ones", func() { - - namespace := "ns" - - svc := makeService("svc", namespace, "http", 8081) - port := intstr.IntOrString{Type: intstr.Int, IntVal: 8081} - - us := makeUpstream("us", namespace, svc) - - host1 := "host1" - - ing1 := makeIng("ing1", namespace, "", host1, "svc", port) - ing2 := makeIng("invalid-svc", namespace, "", "host2", "svc-that-doesnt-exist", port) - - proxy := translateProxy(ctx, "write-namespace", &v1.TranslatorSnapshot{ - Upstreams: []*gloov1.Upstream{us}, - Services: []*v1.KubeService{svc}, - Ingresses: []*v1.Ingress{ing1, ing2}, - }, false, "") - - Expect(proxy.Listeners).To(HaveLen(1)) - vhosts := proxy.Listeners[0].GetHttpListener().GetVirtualHosts() - Expect(vhosts).To(HaveLen(1)) - // expect only ing1 to have been translated - Expect(vhosts[0].Domains).To(Equal([]string{host1, host1 + ":8080"})) - }) - - It("respects a custom ingress class", func() { - - customClass1 := "fancy" - customClass2 := "pants" - - namespace := "ns" - - svc := makeService("svc", namespace, "http", 8081) - port := intstr.IntOrString{Type: intstr.Int, IntVal: 8081} - - us := makeUpstream("us", namespace, svc) - - host1 := "host1" - - ing1 := makeIng("ing1", namespace, customClass1, host1, "svc", port) - ing2 := makeIng("ing2", namespace, customClass2, "host2", "svc", port) - - proxy := translateProxy(ctx, "write-namespace", &v1.TranslatorSnapshot{ - Upstreams: []*gloov1.Upstream{us}, - Services: []*v1.KubeService{svc}, - Ingresses: []*v1.Ingress{ing1, ing2}, - }, true, customClass1) - - Expect(proxy.Listeners).To(HaveLen(1)) - vhosts := proxy.Listeners[0].GetHttpListener().GetVirtualHosts() - Expect(vhosts).To(HaveLen(1)) - // expect only ing1 to have been translated - Expect(vhosts[0].Domains).To(Equal([]string{host1, host1 + ":8080"})) - }) - - It("supports named ports", func() { - - namespace := "ns" - - svc := makeService("svc", namespace, "http", 8081) - port := intstr.IntOrString{Type: intstr.String, StrVal: "http"} - - us := makeUpstream("us", namespace, svc) - - ing1 := makeIng("ing1", namespace, "", "host", "svc", port) - - proxy := translateProxy(ctx, "write-namespace", &v1.TranslatorSnapshot{ - Upstreams: []*gloov1.Upstream{us}, - Services: []*v1.KubeService{svc}, - Ingresses: []*v1.Ingress{ing1}, - }, false, "") - - Expect(proxy.Listeners).To(HaveLen(1)) - vhosts := proxy.Listeners[0].GetHttpListener().GetVirtualHosts() - // successful translation - Expect(vhosts).To(HaveLen(1)) - }) -}) - -func getFirstPort(svc *corev1.Service) int32 { - return svc.Spec.Ports[0].Port -} - -//nolint:unparam // namespace always receives "ns" -func makeIng(name, namespace, ingressClass, host string, svcName string, servicePort intstr.IntOrString) *v1.Ingress { - backendPort := networkingv1.ServiceBackendPort{} - if servicePort.Type == intstr.Int { - backendPort.Number = servicePort.IntVal - } else { - backendPort.Name = servicePort.StrVal - } - pathType := networkingv1.PathTypeImplementationSpecific - ing := &networkingv1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: map[string]string{ - IngressClassKey: ingressClass, - }, - }, - Spec: networkingv1.IngressSpec{ - Rules: []networkingv1.IngressRule{ - { - Host: host, - IngressRuleValue: networkingv1.IngressRuleValue{ - HTTP: &networkingv1.HTTPIngressRuleValue{ - Paths: []networkingv1.HTTPIngressPath{ - { - Path: "/", - PathType: &pathType, - Backend: networkingv1.IngressBackend{ - Service: &networkingv1.IngressServiceBackend{ - Name: svcName, - Port: backendPort, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - ingType, _ := ingresstype.FromKube(ing) - return ingType -} - -func makeService(name, namespace, servicePortName string, servicePort int32) *v1.KubeService { - svc, _ := service.FromKube(&corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{{ - Name: servicePortName, - Port: servicePort, - }}, - }, - }) - - return svc -} - -func makeUpstream(name, namespace string, svc *v1.KubeService) *gloov1.Upstream { - kubeSvc, _ := service.ToKube(svc) - return &gloov1.Upstream{ - Metadata: &core.Metadata{ - Namespace: namespace, - Name: name, - }, - UpstreamType: &gloov1.Upstream_Kube{ - Kube: &kubernetes.UpstreamSpec{ - ServiceNamespace: namespace, - ServiceName: kubeSvc.Name, - ServicePort: uint32(getFirstPort(kubeSvc)), - }, - }, - } -} - -const ingressExampleYaml = ` -items: -- apiVersion: networking.k8s.io/v1 - kind: Ingress - metadata: - annotations: - certmanager.k8s.io/cluster-issuer: letsencrypt-prod - kubernetes.io/ingress.class: gloo - creationTimestamp: "2019-09-09T17:41:10Z" - generation: 1 - name: amoeba-api-ingress - namespace: amoeba-dev - resourceVersion: "26972626" - selfLink: /apis/networking/v1/namespaces/amoeba-dev/ingresses/amoeba-api-ingress - uid: 02c06c8f-d329-11e9-bc54-ce36377988a4 - spec: - rules: - - host: api-dev.intellishift.com - http: - paths: - - backend: - service: - name: api-gateway-amoeba-dev - port: - number: 8080 - path: / - pathType: ImplementationSpecific - tls: - - hosts: - - api-dev.intellishift.com - secretName: amoeba-api-ingress-secret - status: - loadBalancer: {} -- apiVersion: networking.k8s.io/v1 - kind: Ingress - metadata: - annotations: - certmanager.k8s.io/issuer: amoeba-letsencrypt - kubernetes.io/ingress.class: gloo - creationTimestamp: "2019-09-09T17:41:10Z" - generation: 1 - name: amoeba-ui-ingress - namespace: amoeba-dev - resourceVersion: "26972628" - selfLink: /apis/networking/v1/namespaces/amoeba-dev/ingresses/amoeba-ui-ingress - uid: 02c9b69a-d329-11e9-bc54-ce36377988a4 - spec: - rules: - - host: ui-dev.intellishift.com - http: - paths: - - backend: - service: - name: amoeba-ui - port: - number: 8080 - path: / - pathType: ImplementationSpecific - tls: - - hosts: - - ui-dev.intellishift.com - secretName: amoeba-ui-ingress-secret - status: - loadBalancer: {} -kind: List -metadata: - resourceVersion: "" - selfLink: "" -` diff --git a/projects/ingress/pkg/translator/translator_suite_test.go b/projects/ingress/pkg/translator/translator_suite_test.go deleted file mode 100644 index 677a31d29cf..00000000000 --- a/projects/ingress/pkg/translator/translator_suite_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package translator_test - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) - -func TestTranslator(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Translator Suite") -} diff --git a/projects/ingress/pkg/translator/translator_syncer.go b/projects/ingress/pkg/translator/translator_syncer.go deleted file mode 100644 index e46b17221dd..00000000000 --- a/projects/ingress/pkg/translator/translator_syncer.go +++ /dev/null @@ -1,108 +0,0 @@ -package translator - -import ( - "context" - - "github.com/solo-io/gloo/pkg/utils/syncutil" - "github.com/solo-io/go-utils/hashutils" - "go.uber.org/zap/zapcore" - - "github.com/solo-io/gloo/projects/gateway/pkg/utils" - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - glooutils "github.com/solo-io/gloo/projects/gloo/pkg/utils" - v1 "github.com/solo-io/gloo/projects/ingress/pkg/api/v1" - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" -) - -type translatorSyncer struct { - writeNamespace string - writeErrs chan error - proxyClient gloov1.ProxyClient - ingressClient v1.IngressClient - proxyReconciler gloov1.ProxyReconciler - requireIngressClass bool - - // support custom ingress class. - // only relevant when requireIngressClass is true. - // defaults to 'gloo' - customIngressClass string - - statusClient resources.StatusClient -} - -var ( - // labels used to uniquely identify Proxies that are managed by the Gloo controllers - proxyLabelsToWrite = map[string]string{ - glooutils.ProxyTypeKey: glooutils.IngressProxyValue, - } - - // Previously, proxies would be identified with: - // created_by: ingress - // Now, proxies are identified with: - // created_by: gloo-ingress - // - // We need to ensure that users can successfully upgrade from versions - // where the previous labels were used, to versions with the new labels. - // Therefore, we watch Proxies with a superset of the old and new labels, and persist Proxies with new labels. - // - // This is only required for backwards compatibility. - // Once users have upgraded to a version with new labels, we can delete this code and read/write the same labels. - // gloo-ingress-translator removed in 1.17 - // ingress removed in 1.12 - proxyLabelSelectorOptions = clients.ListOpts{ - ExpressionSelector: glooutils.GetTranslatorSelectorExpression(glooutils.IngressProxyValue, "gloo-ingress-translator", "ingress"), - } -) - -func NewSyncer(writeNamespace string, proxyClient gloov1.ProxyClient, ingressClient v1.IngressClient, writeErrs chan error, requireIngressClass bool, customIngressClass string, statusClient resources.StatusClient) v1.TranslatorSyncer { - return &translatorSyncer{ - writeNamespace: writeNamespace, - writeErrs: writeErrs, - proxyClient: proxyClient, - ingressClient: ingressClient, - proxyReconciler: gloov1.NewProxyReconciler(proxyClient, statusClient), - requireIngressClass: requireIngressClass, - customIngressClass: customIngressClass, - statusClient: statusClient, - } -} - -// TODO (ilackarms): make sure that sync happens if proxies get updated as well; may need to resync -func (s *translatorSyncer) Sync(ctx context.Context, snap *v1.TranslatorSnapshot) error { - ctx = contextutils.WithLogger(ctx, "translatorSyncer") - - snapHash := hashutils.MustHash(snap) - logger := contextutils.LoggerFrom(ctx) - logger.Infof("begin sync %v (%v ingresses)", snapHash, - len(snap.Ingresses)) - defer logger.Infof("end sync %v", snapHash) - - // stringifying the snapshot may be an expensive operation, so we'd like to avoid building the large - // string if we're not even going to log it anyway - if contextutils.GetLogLevel() == zapcore.DebugLevel { - logger.Debug(syncutil.StringifySnapshot(snap)) - } - - proxy := translateProxy(ctx, s.writeNamespace, snap, s.requireIngressClass, s.customIngressClass) - - var desiredResources gloov1.ProxyList - if proxy != nil { - logger.Infof("creating proxy %v", proxy.GetMetadata().Ref()) - proxy.GetMetadata().Labels = proxyLabelsToWrite - desiredResources = gloov1.ProxyList{proxy} - } - - proxyTransitionFunction := utils.TransitionFunction(s.statusClient) - - if err := s.proxyReconciler.Reconcile(s.writeNamespace, desiredResources, proxyTransitionFunction, clients.ListOpts{ - Ctx: ctx, - Selector: proxyLabelSelectorOptions.Selector, - ExpressionSelector: proxyLabelSelectorOptions.ExpressionSelector, - }); err != nil { - return err - } - - return nil -} diff --git a/projects/knative/README.md b/projects/knative/README.md deleted file mode 100644 index 3e6a2636ebc..00000000000 --- a/projects/knative/README.md +++ /dev/null @@ -1,169 +0,0 @@ -# Knative Networking with Gloo Edge Cluster Ingress - -`Warning: Knative support is deprecated in Gloo Edge 1.10` and [will not be available in Gloo Edge 1.11](https://github.com/solo-io/gloo/issues/5708) - -With Knative support enabled, Gloo Edge will configure Envoy using [Knative's Cluster Ingress Resource](https://github.com/knative/serving/blob/main/pkg/client/informers/externalversions/networking/v1alpha1/ingress.go). - -The installation process detailed in this document provides a way of using Knative-Serving without needing to install Istio. - -### What you'll need - -1. Kubernetes v1.11.3. We recommend using [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) or -[Kubernetes-in-Docker](https://github.com/kubernetes-sigs/kind) to get a local cluster up quickly. -1. [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) installed on your local machine. - -### Install - -#### 1. Install glooctl - -If this is your first time running Gloo Edge, you’ll need to download the command-line interface (CLI) onto your local machine. -You’ll use this CLI to interact with Gloo Edge, including installing it onto your Kubernetes cluster. - -To install the CLI, run: - -##### Linux/MacOS - -`curl -sL https://run.solo.io/gloo/install | sh` - -##### Windows - -`(New-Object System.Net.WebClient).DownloadString("https://run.solo.io/gloo/windows/install") | iex` - -Alternatively, you can download the CLI directly via the github releases page. - -Next, add Gloo Edge to your path with: - -##### Linux/MacOS - -`export PATH=$HOME/.gloo/bin:$PATH` - -##### Windows - -`$env:Path += ";$env:userprofile/.gloo/bin/"` - -Verify the CLI is installed and running correctly with: - -`glooctl version` - -#### 2. Install Knative and Gloo Edge to your Kubernetes Cluster using glooctl - -Once your Kubernetes cluster is up and running, run the following command to deploy Knative-Serving components to the `knative-serving` namespace and Gloo Edge to the `gloo-system` namespace: - -`glooctl install knative` - - -Check that the Gloo Edge and Knative pods and services have been created: - -```bash -kubectl get all -n gloo-system - -NAME READY STATUS RESTARTS AGE -pod/knative-proxy-65485cd8f4-gg9qq 1/1 Running 0 10m -pod/discovery-5cf7c45fb7-ndj29 1/1 Running 0 10m -pod/gateway-7b48fdfbd8-trwvg 1/1 Running 1 10m -pod/gateway-proxy-984bcf497-29jl8 1/1 Running 0 10m -pod/gloo-5fc9f5c558-n6nlr 1/1 Running 1 10m -pod/ingress-6d8d8f595c-smql8 1/1 Running 0 10m -pod/ingress-proxy-5fc45b8f6d-cckw4 1/1 Running 0 10m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/knative-proxy LoadBalancer 10.96.196.217 80:31639/TCP,443:31025/TCP 14m -service/gateway-proxy LoadBalancer 10.109.135.176 8080:32722/TCP 14m -service/gloo ClusterIP 10.103.179.64 9977/TCP 14m -service/ingress-proxy LoadBalancer 10.110.100.99 80:31738/TCP,443:31769/TCP 14m - -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.apps/knative-proxy 1 1 1 1 14m -deployment.apps/discovery 1 1 1 1 14m -deployment.apps/gateway 1 1 1 1 14m -deployment.apps/gateway-proxy 1 1 1 1 14m -deployment.apps/gloo 1 1 1 1 14m -deployment.apps/ingress 1 1 1 1 14m -deployment.apps/ingress-proxy 1 1 1 1 14m - - -``` - -```bash -kubectl get all -n knative-serving - -NAME READY STATUS RESTARTS AGE -pod/activator-5c4755585c-5wv26 1/1 Running 0 15m -pod/autoscaler-78cd88f869-dvsfr 1/1 Running 0 15m -pod/controller-8d5b85958-tcqn5 1/1 Running 0 15m -pod/webhook-7585d7488c-zk9wz 1/1 Running 0 15m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/activator-service ClusterIP 10.109.189.12 80/TCP,9090/TCP 15m -service/autoscaler ClusterIP 10.98.6.4 8080/TCP,9090/TCP 15m -service/controller ClusterIP 10.108.42.33 9090/TCP 15m -service/webhook ClusterIP 10.99.201.163 443/TCP 15m - -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.apps/activator 1 1 1 1 15m -deployment.apps/autoscaler 1 1 1 1 15m -deployment.apps/controller 1 1 1 1 15m -deployment.apps/webhook 1 1 1 1 15m - -NAME DESIRED CURRENT READY AGE -replicaset.apps/activator-5c4755585c 1 1 1 15m -replicaset.apps/autoscaler-78cd88f869 1 1 1 15m -replicaset.apps/controller-8d5b85958 1 1 1 15m -replicaset.apps/webhook-7585d7488c 1 1 1 15m - -NAME AGE -image.caching.internal.knative.dev/fluentd-sidecar 15m -image.caching.internal.knative.dev/queue-proxy 15m -``` - -#### 3. Send Requests to a Knative App - -Create a Knative App: - -```bash -# deploy a basic helloworld-go service -kubectl apply -f https://raw.githubusercontent.com/solo-io/gloo/main/test/kube2e/artifacts/knative-hello-service.yaml -``` - -Get the URL of the Gloo Edge Knative Ingress: - -```bash -export INGRESS=$(glooctl proxy url --name knative-proxy) -echo $INGRESS - -http://172.17.0.2:31345 -``` - -Note: if your cluster is running in minishift, you'll need to run the following command to get an externally accessible -url: - -```bash -export INGRESS=$(glooctl proxy url --name knative-proxy --local-cluster) -echo $INGRESS - -http://192.168.99.163:32220 - -``` - -Send a request to the app using `curl`: - -```bash -curl -H "Host: helloworld-go.default.example.com" $INGRESS - -Hello Go Sample v1! -``` - -Everything should be up and running. If this process does not work, please [open an issue](https://github.com/solo-io/gloo/issues/new). We are happy to answer -questions on our [diligently staffed Slack channel](https://slack.solo.io/). - - -### Uninstall - -To tear down the installation at any point, you can simply run - -```bash - -kubectl delete namespace gloo-system -kubectl delete namespace knative-serving -``` - diff --git a/projects/knative/api/external/knative/ingress.go b/projects/knative/api/external/knative/ingress.go deleted file mode 100644 index f82e34e8a4f..00000000000 --- a/projects/knative/api/external/knative/ingress.go +++ /dev/null @@ -1,45 +0,0 @@ -package knative - -import ( - "reflect" - - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - "github.com/solo-io/solo-kit/pkg/utils/kubeutils" - "knative.dev/networking/pkg/apis/networking/v1alpha1" -) - -type Ingress v1alpha1.Ingress - -func (p *Ingress) GetMetadata() *core.Metadata { - return kubeutils.FromKubeMeta(p.ObjectMeta, true) -} - -func (p *Ingress) SetMetadata(meta *core.Metadata) { - p.ObjectMeta = kubeutils.ToKubeMeta(meta) -} - -func (p *Ingress) Equal(that interface{}) bool { - return reflect.DeepEqual(p, that) -} - -func (p *Ingress) Clone() *Ingress { - ing := v1alpha1.Ingress(*p) - ingCopy := ing.DeepCopy() - newIng := Ingress(*ingCopy) - return &newIng -} - -func (p *Ingress) IsPublic() bool { - // by default, ingresses are public if they have no rules saying otherwise - isPublic := true - for _, ingressRule := range p.Spec.Rules { - // if there is _any_ ingress rule, it is not public - isPublic = false - - // ...unless we match a configured IngressVisibilityExternalIP - if ingressRule.Visibility == "" || ingressRule.Visibility == v1alpha1.IngressVisibilityExternalIP { - return true - } - } - return isPublic -} diff --git a/projects/knative/api/external/knative/solo-kit.json b/projects/knative/api/external/knative/solo-kit.json deleted file mode 100644 index 87c15c0ed90..00000000000 --- a/projects/knative/api/external/knative/solo-kit.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "name": "networking.internal.knative.dev", - "version": "v1alpha1", - "custom_resources": [ - { - "package": "github.com/solo-io/gloo/projects/knative/api/external/knative", - "type": "Ingress", - "plural_name": "ingresses", - "short_name": "ci" - } - ], - "go_package": "github.com/solo-io/gloo/projects/knative/pkg/api/external/knative" -} diff --git a/projects/knative/api/v1/solo-kit.json b/projects/knative/api/v1/solo-kit.json deleted file mode 100644 index 45ad20328ed..00000000000 --- a/projects/knative/api/v1/solo-kit.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "name": "knative.gloo.solo.io", - "version": "v1", - "go_package": "github.com/solo-io/gloo/projects/knative/pkg/api/v1", - "imports": [ - "github.com/solo-io/gloo/projects/knative/api/external/knative" - ], - "resource_groups": { - "translator.knative.gloo.solo.io": [ - { - "name": "Ingress", - "package": "github.com/solo-io/gloo/projects/knative/pkg/api/external/knative" - } - ] - } -} diff --git a/projects/knative/pkg/api/custom/knative/cache.go b/projects/knative/pkg/api/custom/knative/cache.go deleted file mode 100644 index 0129d360698..00000000000 --- a/projects/knative/pkg/api/custom/knative/cache.go +++ /dev/null @@ -1,84 +0,0 @@ -package knative - -import ( - "context" - "sync" - "time" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/controller" - knativeclient "knative.dev/networking/pkg/client/clientset/versioned" - knativeinformers "knative.dev/networking/pkg/client/informers/externalversions" - knativelisters "knative.dev/networking/pkg/client/listers/networking/v1alpha1" -) - -type Cache interface { - IngressLister() knativelisters.IngressLister - Subscribe() <-chan struct{} - Unsubscribe(<-chan struct{}) -} - -type knativeCache struct { - ingress knativelisters.IngressLister - - cacheUpdatedWatchers []chan struct{} - cacheUpdatedWatchersMutex sync.Mutex -} - -// This context should live as long as the cache is desired. i.e. if the cache is shared -// across clients, it should get a context that has a longer lifetime than the clients themselves -func NewIngressCache(ctx context.Context, knativeClient knativeclient.Interface) (*knativeCache, error) { - resyncDuration := 12 * time.Hour - sharedInformerFactory := knativeinformers.NewSharedInformerFactory(knativeClient, resyncDuration) - - ingress := sharedInformerFactory.Networking().V1alpha1().Ingresses() - - k := &knativeCache{ - ingress: ingress.Lister(), - } - - kubeController := controller.NewController("knative-resources-cache", - controller.NewLockingSyncHandler(k.updatedOccurred), - ingress.Informer()) - - stop := ctx.Done() - err := kubeController.Run(2, stop) - if err != nil { - return nil, err - } - - return k, nil -} - -func (k *knativeCache) IngressLister() knativelisters.IngressLister { - return k.ingress -} - -func (k *knativeCache) Subscribe() <-chan struct{} { - k.cacheUpdatedWatchersMutex.Lock() - defer k.cacheUpdatedWatchersMutex.Unlock() - c := make(chan struct{}, 10) - k.cacheUpdatedWatchers = append(k.cacheUpdatedWatchers, c) - return c -} - -func (k *knativeCache) Unsubscribe(c <-chan struct{}) { - k.cacheUpdatedWatchersMutex.Lock() - defer k.cacheUpdatedWatchersMutex.Unlock() - for i, cacheUpdated := range k.cacheUpdatedWatchers { - if cacheUpdated == c { - k.cacheUpdatedWatchers = append(k.cacheUpdatedWatchers[:i], k.cacheUpdatedWatchers[i+1:]...) - return - } - } -} - -func (k *knativeCache) updatedOccurred() { - k.cacheUpdatedWatchersMutex.Lock() - defer k.cacheUpdatedWatchersMutex.Unlock() - for _, cacheUpdated := range k.cacheUpdatedWatchers { - select { - case cacheUpdated <- struct{}{}: - default: - } - } -} diff --git a/projects/knative/pkg/api/custom/knative/knative_clusteringress_client.go b/projects/knative/pkg/api/custom/knative/knative_clusteringress_client.go deleted file mode 100644 index bc07063e3c8..00000000000 --- a/projects/knative/pkg/api/custom/knative/knative_clusteringress_client.go +++ /dev/null @@ -1,165 +0,0 @@ -package knative - -import ( - "context" - "fmt" - "sort" - - "github.com/solo-io/go-utils/contextutils" - - "github.com/solo-io/gloo/projects/knative/api/external/knative" - v1alpha1 "github.com/solo-io/gloo/projects/knative/pkg/api/external/knative" - knativev1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1" - knativeclient "knative.dev/networking/pkg/client/clientset/versioned" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" -) - -type ResourceClient struct { - knativeClient knativeclient.Interface - cache Cache -} - -func NewResourceClient(knativeClient knativeclient.Interface, cache Cache) *ResourceClient { - return &ResourceClient{ - knativeClient: knativeClient, - cache: cache, - } -} - -func FromKube(ci *knativev1alpha1.Ingress) *v1alpha1.Ingress { - deepCopy := ci.DeepCopy() - baseType := knative.Ingress(*deepCopy) - resource := &v1alpha1.Ingress{ - Ingress: baseType, - } - - return resource -} - -func ToKube(resource resources.Resource) (*knativev1alpha1.Ingress, error) { - ingressResource, ok := resource.(*v1alpha1.Ingress) - if !ok { - return nil, errors.Errorf("internal error: invalid resource %v passed to ingress client", resources.Kind(resource)) - } - - ingress := knativev1alpha1.Ingress(ingressResource.Ingress) - - return &ingress, nil -} - -var _ clients.ResourceClient = &ResourceClient{} - -func (rc *ResourceClient) Kind() string { - return resources.Kind(&v1alpha1.Ingress{}) -} - -func (rc *ResourceClient) NewResource() resources.Resource { - return resources.Clone(&v1alpha1.Ingress{}) -} - -func (rc *ResourceClient) Register() error { - return nil -} - -func (rc *ResourceClient) Read(namespace, name string, opts clients.ReadOpts) (resources.Resource, error) { - contextutils.LoggerFrom(context.Background()).DPanic("this client does not support read operations") - return nil, fmt.Errorf("this client does not support read operations") -} - -func (rc *ResourceClient) Write(resource resources.Resource, opts clients.WriteOpts) (resources.Resource, error) { - contextutils.LoggerFrom(context.Background()).DPanic("this client does not support write operations") - return nil, fmt.Errorf("this client does not support write operations") -} - -func (rc *ResourceClient) Delete(namespace, name string, opts clients.DeleteOpts) error { - contextutils.LoggerFrom(context.Background()).DPanic("this client does not support delete operations") - return fmt.Errorf("this client does not support delete operations") -} - -func (rc *ResourceClient) ApplyStatus(statusClient resources.StatusClient, inputResource resources.InputResource, opts clients.ApplyStatusOpts) (resources.Resource, error) { - contextutils.LoggerFrom(context.Background()).DPanic("this client does not support apply status operations") - return nil, fmt.Errorf("this client does not support apply status operations") -} - -func (rc *ResourceClient) List(namespace string, opts clients.ListOpts) (resources.ResourceList, error) { - opts = opts.WithDefaults() - - ingressObjList, err := rc.cache.IngressLister().Ingresses(namespace).List(labels.SelectorFromSet(opts.Selector)) - if err != nil { - return nil, errors.Wrapf(err, "listing Ingresses") - } - var resourceList resources.ResourceList - for _, IngressObj := range ingressObjList { - resource := FromKube(IngressObj) - - if resource == nil { - continue - } - resourceList = append(resourceList, resource) - } - - sort.SliceStable(resourceList, func(i, j int) bool { - return resourceList[i].GetMetadata().GetName() < resourceList[j].GetMetadata().GetName() - }) - - return resourceList, nil -} - -func (rc *ResourceClient) Watch(namespace string, opts clients.WatchOpts) (<-chan resources.ResourceList, <-chan error, error) { - opts = opts.WithDefaults() - watch := rc.cache.Subscribe() - - resourcesChan := make(chan resources.ResourceList) - errs := make(chan error) - // prevent flooding the channel with duplicates - var previous *resources.ResourceList - updateResourceList := func() { - list, err := rc.List(namespace, clients.ListOpts{ - Ctx: opts.Ctx, - Selector: opts.Selector, - }) - if err != nil { - errs <- err - return - } - if previous != nil { - if list.Equal(*previous) { - return - } - } - previous = &list - resourcesChan <- list - } - - go func() { - defer rc.cache.Unsubscribe(watch) - defer close(resourcesChan) - defer close(errs) - - // watch should open up with an initial read - updateResourceList() - for { - select { - case _, ok := <-watch: - if !ok { - return - } - updateResourceList() - case <-opts.Ctx.Done(): - return - } - } - }() - - return resourcesChan, errs, nil -} - -func (rc *ResourceClient) exist(ctx context.Context, namespace, name string) bool { - _, err := rc.knativeClient.NetworkingV1alpha1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{}) - return err == nil -} diff --git a/projects/knative/pkg/api/external/knative/ingress.sk.go b/projects/knative/pkg/api/external/knative/ingress.sk.go deleted file mode 100644 index 77fb5316fd4..00000000000 --- a/projects/knative/pkg/api/external/knative/ingress.sk.go +++ /dev/null @@ -1,159 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1alpha1 - -import ( - "encoding/binary" - "hash" - "hash/fnv" - "log" - "sort" - - github_com_solo_io_gloo_projects_knative_api_external_knative "github.com/solo-io/gloo/projects/knative/api/external/knative" - - "github.com/solo-io/go-utils/hashutils" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - "github.com/solo-io/solo-kit/pkg/errors" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - // Compile-time assertion - _ resources.Resource = new(Ingress) -) - -func NewIngressHashableResource() resources.HashableResource { - return new(Ingress) -} - -func NewIngress(namespace, name string) *Ingress { - ingress := &Ingress{} - ingress.Ingress.SetMetadata(&core.Metadata{ - Name: name, - Namespace: namespace, - }) - return ingress -} - -// require custom resource to implement Clone() as well as resources.Resource interface - -type CloneableIngress interface { - resources.Resource - Clone() *github_com_solo_io_gloo_projects_knative_api_external_knative.Ingress -} - -var _ CloneableIngress = &github_com_solo_io_gloo_projects_knative_api_external_knative.Ingress{} - -type Ingress struct { - github_com_solo_io_gloo_projects_knative_api_external_knative.Ingress -} - -func (r *Ingress) Clone() resources.Resource { - return &Ingress{Ingress: *r.Ingress.Clone()} -} - -func (r *Ingress) Hash(hasher hash.Hash64) (uint64, error) { - if hasher == nil { - hasher = fnv.New64() - } - clone := r.Ingress.Clone() - resources.UpdateMetadata(clone, func(meta *core.Metadata) { - meta.ResourceVersion = "" - }) - err := binary.Write(hasher, binary.LittleEndian, hashutils.HashAll(clone)) - if err != nil { - return 0, err - } - return hasher.Sum64(), nil -} - -func (r *Ingress) MustHash() uint64 { - hashVal, err := r.Hash(nil) - if err != nil { - log.Panicf("error while hashing: (%s) this should never happen", err) - } - return hashVal -} - -func (r *Ingress) GroupVersionKind() schema.GroupVersionKind { - return IngressGVK -} - -type IngressList []*Ingress - -func (list IngressList) Find(namespace, name string) (*Ingress, error) { - for _, ingress := range list { - if ingress.GetMetadata().Name == name && ingress.GetMetadata().Namespace == namespace { - return ingress, nil - } - } - return nil, errors.Errorf("list did not find ingress %v.%v", namespace, name) -} - -func (list IngressList) AsResources() resources.ResourceList { - var ress resources.ResourceList - for _, ingress := range list { - ress = append(ress, ingress) - } - return ress -} - -func (list IngressList) Names() []string { - var names []string - for _, ingress := range list { - names = append(names, ingress.GetMetadata().Name) - } - return names -} - -func (list IngressList) NamespacesDotNames() []string { - var names []string - for _, ingress := range list { - names = append(names, ingress.GetMetadata().Namespace+"."+ingress.GetMetadata().Name) - } - return names -} - -func (list IngressList) Sort() IngressList { - sort.SliceStable(list, func(i, j int) bool { - return list[i].GetMetadata().Less(list[j].GetMetadata()) - }) - return list -} - -func (list IngressList) Clone() IngressList { - var ingressList IngressList - for _, ingress := range list { - ingressList = append(ingressList, resources.Clone(ingress).(*Ingress)) - } - return ingressList -} - -func (list IngressList) Each(f func(element *Ingress)) { - for _, ingress := range list { - f(ingress) - } -} - -func (list IngressList) EachResource(f func(element resources.Resource)) { - for _, ingress := range list { - f(ingress) - } -} - -func (list IngressList) AsInterfaces() []interface{} { - var asInterfaces []interface{} - list.Each(func(element *Ingress) { - asInterfaces = append(asInterfaces, element) - }) - return asInterfaces -} - -var ( - IngressGVK = schema.GroupVersionKind{ - Version: "v1alpha1", - Group: "networking.internal.knative.dev", - Kind: "Ingress", - } -) diff --git a/projects/knative/pkg/api/external/knative/ingress_client.sk.go b/projects/knative/pkg/api/external/knative/ingress_client.sk.go deleted file mode 100644 index bb38024ee61..00000000000 --- a/projects/knative/pkg/api/external/knative/ingress_client.sk.go +++ /dev/null @@ -1,130 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/errors" -) - -type IngressWatcher interface { - // watch namespace-scoped ingresses - Watch(namespace string, opts clients.WatchOpts) (<-chan IngressList, <-chan error, error) -} - -type IngressClient interface { - BaseClient() clients.ResourceClient - Register() error - Read(namespace, name string, opts clients.ReadOpts) (*Ingress, error) - Write(resource *Ingress, opts clients.WriteOpts) (*Ingress, error) - Delete(namespace, name string, opts clients.DeleteOpts) error - List(namespace string, opts clients.ListOpts) (IngressList, error) - IngressWatcher -} - -type ingressClient struct { - rc clients.ResourceClient -} - -func NewIngressClient(ctx context.Context, rcFactory factory.ResourceClientFactory) (IngressClient, error) { - return NewIngressClientWithToken(ctx, rcFactory, "") -} - -func NewIngressClientWithToken(ctx context.Context, rcFactory factory.ResourceClientFactory, token string) (IngressClient, error) { - rc, err := rcFactory.NewResourceClient(ctx, factory.NewResourceClientParams{ - ResourceType: &Ingress{}, - Token: token, - }) - if err != nil { - return nil, errors.Wrapf(err, "creating base Ingress resource client") - } - return NewIngressClientWithBase(rc), nil -} - -func NewIngressClientWithBase(rc clients.ResourceClient) IngressClient { - return &ingressClient{ - rc: rc, - } -} - -func (client *ingressClient) BaseClient() clients.ResourceClient { - return client.rc -} - -func (client *ingressClient) Register() error { - return client.rc.Register() -} - -func (client *ingressClient) Read(namespace, name string, opts clients.ReadOpts) (*Ingress, error) { - opts = opts.WithDefaults() - - resource, err := client.rc.Read(namespace, name, opts) - if err != nil { - return nil, err - } - return resource.(*Ingress), nil -} - -func (client *ingressClient) Write(ingress *Ingress, opts clients.WriteOpts) (*Ingress, error) { - opts = opts.WithDefaults() - resource, err := client.rc.Write(ingress, opts) - if err != nil { - return nil, err - } - return resource.(*Ingress), nil -} - -func (client *ingressClient) Delete(namespace, name string, opts clients.DeleteOpts) error { - opts = opts.WithDefaults() - - return client.rc.Delete(namespace, name, opts) -} - -func (client *ingressClient) List(namespace string, opts clients.ListOpts) (IngressList, error) { - opts = opts.WithDefaults() - - resourceList, err := client.rc.List(namespace, opts) - if err != nil { - return nil, err - } - return convertToIngress(resourceList), nil -} - -func (client *ingressClient) Watch(namespace string, opts clients.WatchOpts) (<-chan IngressList, <-chan error, error) { - opts = opts.WithDefaults() - - resourcesChan, errs, initErr := client.rc.Watch(namespace, opts) - if initErr != nil { - return nil, nil, initErr - } - ingressesChan := make(chan IngressList) - go func() { - for { - select { - case resourceList := <-resourcesChan: - select { - case ingressesChan <- convertToIngress(resourceList): - case <-opts.Ctx.Done(): - close(ingressesChan) - return - } - case <-opts.Ctx.Done(): - close(ingressesChan) - return - } - } - }() - return ingressesChan, errs, nil -} - -func convertToIngress(resources resources.ResourceList) IngressList { - var ingressList IngressList - for _, resource := range resources { - ingressList = append(ingressList, resource.(*Ingress)) - } - return ingressList -} diff --git a/projects/knative/pkg/api/external/knative/ingress_reconciler.sk.go b/projects/knative/pkg/api/external/knative/ingress_reconciler.sk.go deleted file mode 100644 index dca1dc7d147..00000000000 --- a/projects/knative/pkg/api/external/knative/ingress_reconciler.sk.go +++ /dev/null @@ -1,47 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1alpha1 - -import ( - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/reconcile" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" -) - -// Option to copy anything from the original to the desired before writing. Return value of false means don't update -type TransitionIngressFunc func(original, desired *Ingress) (bool, error) - -type IngressReconciler interface { - Reconcile(namespace string, desiredResources IngressList, transition TransitionIngressFunc, opts clients.ListOpts) error -} - -func ingresssToResources(list IngressList) resources.ResourceList { - var resourceList resources.ResourceList - for _, ingress := range list { - resourceList = append(resourceList, ingress) - } - return resourceList -} - -func NewIngressReconciler(client IngressClient, statusSetter resources.StatusSetter) IngressReconciler { - return &ingressReconciler{ - base: reconcile.NewReconciler(client.BaseClient(), statusSetter), - } -} - -type ingressReconciler struct { - base reconcile.Reconciler -} - -func (r *ingressReconciler) Reconcile(namespace string, desiredResources IngressList, transition TransitionIngressFunc, opts clients.ListOpts) error { - opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "ingress_reconciler") - var transitionResources reconcile.TransitionResourcesFunc - if transition != nil { - transitionResources = func(original, desired resources.Resource) (bool, error) { - return transition(original.(*Ingress), desired.(*Ingress)) - } - } - return r.base.Reconcile(namespace, ingresssToResources(desiredResources), transitionResources, opts) -} diff --git a/projects/knative/pkg/api/v1/translator_event_loop.sk.go b/projects/knative/pkg/api/v1/translator_event_loop.sk.go deleted file mode 100644 index a89514bc989..00000000000 --- a/projects/knative/pkg/api/v1/translator_event_loop.sk.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - "github.com/hashicorp/go-multierror" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" -) - -var ( - mTranslatorSnapshotTimeSec = stats.Float64("translator.knative.gloo.solo.io/sync/time_sec", "The time taken for a given sync", "1") - mTranslatorSnapshotTimeSecView = &view.View{ - Name: "translator.knative.gloo.solo.io/sync/time_sec", - Description: "The time taken for a given sync", - TagKeys: []tag.Key{tag.MustNewKey("syncer_name")}, - Measure: mTranslatorSnapshotTimeSec, - Aggregation: view.Distribution(0.01, 0.05, 0.1, 0.25, 0.5, 1, 5, 10, 60), - } -) - -func init() { - view.Register( - mTranslatorSnapshotTimeSecView, - ) -} - -type TranslatorSyncer interface { - Sync(context.Context, *TranslatorSnapshot) error -} - -type TranslatorSyncers []TranslatorSyncer - -func (s TranslatorSyncers) Sync(ctx context.Context, snapshot *TranslatorSnapshot) error { - var multiErr *multierror.Error - for _, syncer := range s { - if err := syncer.Sync(ctx, snapshot); err != nil { - multiErr = multierror.Append(multiErr, err) - } - } - return multiErr.ErrorOrNil() -} - -type translatorEventLoop struct { - emitter TranslatorSnapshotEmitter - syncer TranslatorSyncer - ready chan struct{} -} - -func NewTranslatorEventLoop(emitter TranslatorSnapshotEmitter, syncer TranslatorSyncer) eventloop.EventLoop { - return &translatorEventLoop{ - emitter: emitter, - syncer: syncer, - ready: make(chan struct{}), - } -} - -func (el *translatorEventLoop) Ready() <-chan struct{} { - return el.ready -} - -func (el *translatorEventLoop) Run(namespaces []string, opts clients.WatchOpts) (<-chan error, error) { - opts = opts.WithDefaults() - opts.Ctx = contextutils.WithLogger(opts.Ctx, "v1.event_loop") - logger := contextutils.LoggerFrom(opts.Ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(namespaces, opts) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - go errutils.AggregateErrs(opts.Ctx, errs, emitterErrs, "v1.emitter errors") - go func() { - var channelClosed bool - - // create a new context for each loop, cancel it before each loop - var cancel context.CancelFunc = func() {} - - // use closure to allow cancel function to be updated as context changes - defer func() { cancel() }() - - // cache the previous snapshot for comparison - var previousSnapshot *TranslatorSnapshot - - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - - if syncDecider, isDecider := el.syncer.(TranslatorSyncDecider); isDecider { - if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } else if syncDeciderWithContext, isDecider := el.syncer.(TranslatorSyncDeciderWithContext); isDecider { - if shouldSync := syncDeciderWithContext.ShouldSync(opts.Ctx, previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } - - // cancel any open watches from previous loop - cancel() - - startTime := time.Now() - ctx, span := trace.StartSpan(opts.Ctx, "translator.knative.gloo.solo.io.EventLoopSync") - ctx, canc := context.WithCancel(ctx) - cancel = canc - err := el.syncer.Sync(ctx, snapshot) - stats.RecordWithTags( - ctx, - []tag.Mutator{ - tag.Insert(skstats.SyncerNameKey, fmt.Sprintf("%T", el.syncer)), - }, - mTranslatorSnapshotTimeSec.M(time.Now().Sub(startTime).Seconds()), - ) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } else if !channelClosed { - channelClosed = true - close(el.ready) - } - - previousSnapshot = snapshot - - case <-opts.Ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/projects/knative/pkg/api/v1/translator_simple_event_loop.sk.go b/projects/knative/pkg/api/v1/translator_simple_event_loop.sk.go deleted file mode 100644 index c6659d2dc1b..00000000000 --- a/projects/knative/pkg/api/v1/translator_simple_event_loop.sk.go +++ /dev/null @@ -1,134 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/eventloop" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" -) - -// SyncDeciders Syncer which implements this interface -// can make smarter decisions over whether -// it should be restarted (including having its context cancelled) -// based on a diff of the previous and current snapshot - -// Deprecated: use TranslatorSyncDeciderWithContext -type TranslatorSyncDecider interface { - TranslatorSyncer - ShouldSync(old, new *TranslatorSnapshot) bool -} - -type TranslatorSyncDeciderWithContext interface { - TranslatorSyncer - ShouldSync(ctx context.Context, old, new *TranslatorSnapshot) bool -} - -type translatorSimpleEventLoop struct { - emitter TranslatorSimpleEmitter - syncers []TranslatorSyncer -} - -func NewTranslatorSimpleEventLoop(emitter TranslatorSimpleEmitter, syncers ...TranslatorSyncer) eventloop.SimpleEventLoop { - return &translatorSimpleEventLoop{ - emitter: emitter, - syncers: syncers, - } -} - -func (el *translatorSimpleEventLoop) Run(ctx context.Context) (<-chan error, error) { - ctx = contextutils.WithLogger(ctx, "v1.event_loop") - logger := contextutils.LoggerFrom(ctx) - logger.Infof("event loop started") - - errs := make(chan error) - - watch, emitterErrs, err := el.emitter.Snapshots(ctx) - if err != nil { - return nil, errors.Wrapf(err, "starting snapshot watch") - } - - go errutils.AggregateErrs(ctx, errs, emitterErrs, "v1.emitter errors") - go func() { - // create a new context for each syncer for each loop, cancel each before each loop - syncerCancels := make(map[TranslatorSyncer]context.CancelFunc) - - // use closure to allow cancel function to be updated as context changes - defer func() { - for _, cancel := range syncerCancels { - cancel() - } - }() - - // cache the previous snapshot for comparison - var previousSnapshot *TranslatorSnapshot - - for { - select { - case snapshot, ok := <-watch: - if !ok { - return - } - - // cancel any open watches from previous loop - for _, syncer := range el.syncers { - // allow the syncer to decide if we should sync it + cancel its previous context - if syncDecider, isDecider := syncer.(TranslatorSyncDecider); isDecider { - if shouldSync := syncDecider.ShouldSync(previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } else if syncDeciderWithContext, isDecider := syncer.(TranslatorSyncDeciderWithContext); isDecider { - if shouldSync := syncDeciderWithContext.ShouldSync(ctx, previousSnapshot, snapshot); !shouldSync { - continue // skip syncing this syncer - } - } - - // if this syncer had a previous context, cancel it - cancel, ok := syncerCancels[syncer] - if ok { - cancel() - } - - startTime := time.Now() - ctx, span := trace.StartSpan(ctx, fmt.Sprintf("translator.knative.gloo.solo.io.SimpleEventLoopSync-%T", syncer)) - ctx, canc := context.WithCancel(ctx) - err := syncer.Sync(ctx, snapshot) - stats.RecordWithTags( - ctx, - []tag.Mutator{ - tag.Insert(skstats.SyncerNameKey, fmt.Sprintf("%T", syncer)), - }, - mTranslatorSnapshotTimeSec.M(time.Now().Sub(startTime).Seconds()), - ) - span.End() - - if err != nil { - select { - case errs <- err: - default: - logger.Errorf("write error channel is full! could not propagate err: %v", err) - } - } - - syncerCancels[syncer] = canc - } - - previousSnapshot = snapshot - - case <-ctx.Done(): - return - } - } - }() - return errs, nil -} diff --git a/projects/knative/pkg/api/v1/translator_snapshot.sk.go b/projects/knative/pkg/api/v1/translator_snapshot.sk.go deleted file mode 100644 index 42f91e47d9a..00000000000 --- a/projects/knative/pkg/api/v1/translator_snapshot.sk.go +++ /dev/null @@ -1,146 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "fmt" - "hash" - "hash/fnv" - "log" - - github_com_solo_io_gloo_projects_knative_pkg_api_external_knative "github.com/solo-io/gloo/projects/knative/pkg/api/external/knative" - - "github.com/rotisserie/eris" - "github.com/solo-io/go-utils/hashutils" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - "go.uber.org/zap" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type TranslatorSnapshot struct { - Ingresses github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressList -} - -func (s TranslatorSnapshot) Clone() TranslatorSnapshot { - return TranslatorSnapshot{ - Ingresses: s.Ingresses.Clone(), - } -} - -func (s TranslatorSnapshot) Hash(hasher hash.Hash64) (uint64, error) { - if hasher == nil { - hasher = fnv.New64() - } - if _, err := s.hashIngresses(hasher); err != nil { - return 0, err - } - return hasher.Sum64(), nil -} - -func (s TranslatorSnapshot) hashIngresses(hasher hash.Hash64) (uint64, error) { - return hashutils.HashAllSafe(hasher, s.Ingresses.AsInterfaces()...) -} - -func (s TranslatorSnapshot) HashFields() []zap.Field { - var fields []zap.Field - hasher := fnv.New64() - IngressesHash, err := s.hashIngresses(hasher) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - fields = append(fields, zap.Uint64("ingresses", IngressesHash)) - snapshotHash, err := s.Hash(hasher) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - return append(fields, zap.Uint64("snapshotHash", snapshotHash)) -} - -func (s *TranslatorSnapshot) GetResourcesList(resource resources.Resource) (resources.ResourceList, error) { - switch resource.(type) { - case *github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.Ingress: - return s.Ingresses.AsResources(), nil - default: - return resources.ResourceList{}, eris.New("did not contain the input resource type returning empty list") - } -} - -func (s *TranslatorSnapshot) RemoveFromResourceList(resource resources.Resource) error { - refKey := resource.GetMetadata().Ref().Key() - switch resource.(type) { - case *github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.Ingress: - - for i, res := range s.Ingresses { - if refKey == res.GetMetadata().Ref().Key() { - s.Ingresses = append(s.Ingresses[:i], s.Ingresses[i+1:]...) - break - } - } - return nil - default: - return eris.Errorf("did not remove the resource because its type does not exist [%T]", resource) - } -} - -func (s *TranslatorSnapshot) RemoveMatches(predicate core.Predicate) { - var Ingresses github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressList - for _, res := range s.Ingresses { - if matches := predicate(res.GetMetadata()); !matches { - Ingresses = append(Ingresses, res) - } - } - s.Ingresses = Ingresses -} - -func (s *TranslatorSnapshot) UpsertToResourceList(resource resources.Resource) error { - refKey := resource.GetMetadata().Ref().Key() - switch typed := resource.(type) { - case *github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.Ingress: - updated := false - for i, res := range s.Ingresses { - if refKey == res.GetMetadata().Ref().Key() { - s.Ingresses[i] = typed - updated = true - } - } - if !updated { - s.Ingresses = append(s.Ingresses, typed) - } - s.Ingresses.Sort() - return nil - default: - return eris.Errorf("did not add/replace the resource type because it does not exist %T", resource) - } -} - -type TranslatorSnapshotStringer struct { - Version uint64 - Ingresses []string -} - -func (ss TranslatorSnapshotStringer) String() string { - s := fmt.Sprintf("TranslatorSnapshot %v\n", ss.Version) - - s += fmt.Sprintf(" Ingresses %v\n", len(ss.Ingresses)) - for _, name := range ss.Ingresses { - s += fmt.Sprintf(" %v\n", name) - } - - return s -} - -func (s TranslatorSnapshot) Stringer() TranslatorSnapshotStringer { - snapshotHash, err := s.Hash(nil) - if err != nil { - log.Println(eris.Wrapf(err, "error hashing, this should never happen")) - } - return TranslatorSnapshotStringer{ - Version: snapshotHash, - Ingresses: s.Ingresses.NamespacesDotNames(), - } -} - -var TranslatorGvkToHashableResource = map[schema.GroupVersionKind]func() resources.HashableResource{ - github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressGVK: github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.NewIngressHashableResource, -} diff --git a/projects/knative/pkg/api/v1/translator_snapshot_emitter.sk.go b/projects/knative/pkg/api/v1/translator_snapshot_emitter.sk.go deleted file mode 100644 index d3e5e940726..00000000000 --- a/projects/knative/pkg/api/v1/translator_snapshot_emitter.sk.go +++ /dev/null @@ -1,261 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "sync" - "time" - - github_com_solo_io_gloo_projects_knative_pkg_api_external_knative "github.com/solo-io/gloo/projects/knative/pkg/api/external/knative" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.uber.org/zap" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/errors" - skstats "github.com/solo-io/solo-kit/pkg/stats" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" -) - -var ( - // Deprecated. See mTranslatorResourcesIn - mTranslatorSnapshotIn = stats.Int64("translator.knative.gloo.solo.io/emitter/snap_in", "Deprecated. Use translator.knative.gloo.solo.io/emitter/resources_in. The number of snapshots in", "1") - - // metrics for emitter - mTranslatorResourcesIn = stats.Int64("translator.knative.gloo.solo.io/emitter/resources_in", "The number of resource lists received on open watch channels", "1") - mTranslatorSnapshotOut = stats.Int64("translator.knative.gloo.solo.io/emitter/snap_out", "The number of snapshots out", "1") - mTranslatorSnapshotMissed = stats.Int64("translator.knative.gloo.solo.io/emitter/snap_missed", "The number of snapshots missed", "1") - - // views for emitter - // deprecated: see translatorResourcesInView - translatorsnapshotInView = &view.View{ - Name: "translator.knative.gloo.solo.io/emitter/snap_in", - Measure: mTranslatorSnapshotIn, - Description: "Deprecated. Use translator.knative.gloo.solo.io/emitter/resources_in. The number of snapshots updates coming in.", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - - translatorResourcesInView = &view.View{ - Name: "translator.knative.gloo.solo.io/emitter/resources_in", - Measure: mTranslatorResourcesIn, - Description: "The number of resource lists received on open watch channels", - Aggregation: view.Count(), - TagKeys: []tag.Key{ - skstats.NamespaceKey, - skstats.ResourceKey, - }, - } - translatorsnapshotOutView = &view.View{ - Name: "translator.knative.gloo.solo.io/emitter/snap_out", - Measure: mTranslatorSnapshotOut, - Description: "The number of snapshots updates going out", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } - translatorsnapshotMissedView = &view.View{ - Name: "translator.knative.gloo.solo.io/emitter/snap_missed", - Measure: mTranslatorSnapshotMissed, - Description: "The number of snapshots updates going missed. this can happen in heavy load. missed snapshot will be re-tried after a second.", - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - } -) - -func init() { - view.Register( - translatorsnapshotInView, - translatorsnapshotOutView, - translatorsnapshotMissedView, - translatorResourcesInView, - ) -} - -type TranslatorSnapshotEmitter interface { - Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TranslatorSnapshot, <-chan error, error) -} - -type TranslatorEmitter interface { - TranslatorSnapshotEmitter - Register() error - Ingress() github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressClient -} - -func NewTranslatorEmitter(ingressClient github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressClient) TranslatorEmitter { - return NewTranslatorEmitterWithEmit(ingressClient, make(chan struct{})) -} - -func NewTranslatorEmitterWithEmit(ingressClient github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressClient, emit <-chan struct{}) TranslatorEmitter { - return &translatorEmitter{ - ingress: ingressClient, - forceEmit: emit, - } -} - -type translatorEmitter struct { - forceEmit <-chan struct{} - ingress github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressClient -} - -func (c *translatorEmitter) Register() error { - if err := c.ingress.Register(); err != nil { - return err - } - return nil -} - -func (c *translatorEmitter) Ingress() github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressClient { - return c.ingress -} - -func (c *translatorEmitter) Snapshots(watchNamespaces []string, opts clients.WatchOpts) (<-chan *TranslatorSnapshot, <-chan error, error) { - - if len(watchNamespaces) == 0 { - watchNamespaces = []string{""} - } - - for _, ns := range watchNamespaces { - if ns == "" && len(watchNamespaces) > 1 { - return nil, nil, errors.Errorf("the \"\" namespace is used to watch all namespaces. Snapshots can either be tracked for " + - "specific namespaces or \"\" AllNamespaces, but not both.") - } - } - - errs := make(chan error) - var done sync.WaitGroup - ctx := opts.Ctx - /* Create channel for Ingress */ - type ingressListWithNamespace struct { - list github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressList - namespace string - } - ingressChan := make(chan ingressListWithNamespace) - - var initialIngressList github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressList - - currentSnapshot := TranslatorSnapshot{} - ingressesByNamespace := make(map[string]github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressList) - - for _, namespace := range watchNamespaces { - /* Setup namespaced watch for Ingress */ - { - ingresses, err := c.ingress.List(namespace, clients.ListOpts{Ctx: opts.Ctx, Selector: opts.Selector}) - if err != nil { - return nil, nil, errors.Wrapf(err, "initial Ingress list") - } - initialIngressList = append(initialIngressList, ingresses...) - ingressesByNamespace[namespace] = ingresses - } - ingressNamespacesChan, ingressErrs, err := c.ingress.Watch(namespace, opts) - if err != nil { - return nil, nil, errors.Wrapf(err, "starting Ingress watch") - } - - done.Add(1) - go func(namespace string) { - defer done.Done() - errutils.AggregateErrs(ctx, errs, ingressErrs, namespace+"-ingresses") - }(namespace) - - /* Watch for changes and update snapshot */ - go func(namespace string) { - for { - select { - case <-ctx.Done(): - return - case ingressList, ok := <-ingressNamespacesChan: - if !ok { - return - } - select { - case <-ctx.Done(): - return - case ingressChan <- ingressListWithNamespace{list: ingressList, namespace: namespace}: - } - } - } - }(namespace) - } - /* Initialize snapshot for Ingresses */ - currentSnapshot.Ingresses = initialIngressList.Sort() - - snapshots := make(chan *TranslatorSnapshot) - go func() { - // sent initial snapshot to kick off the watch - initialSnapshot := currentSnapshot.Clone() - snapshots <- &initialSnapshot - - timer := time.NewTicker(time.Second * 1) - previousHash, err := currentSnapshot.Hash(nil) - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - sync := func() { - currentHash, err := currentSnapshot.Hash(nil) - // this should never happen, so panic if it does - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - if previousHash == currentHash { - return - } - - sentSnapshot := currentSnapshot.Clone() - select { - case snapshots <- &sentSnapshot: - stats.Record(ctx, mTranslatorSnapshotOut.M(1)) - previousHash = currentHash - default: - stats.Record(ctx, mTranslatorSnapshotMissed.M(1)) - } - } - - defer func() { - close(snapshots) - // we must wait for done before closing the error chan, - // to avoid sending on close channel. - done.Wait() - close(errs) - }() - for { - record := func() { stats.Record(ctx, mTranslatorSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case ingressNamespacedList, ok := <-ingressChan: - if !ok { - return - } - record() - - namespace := ingressNamespacedList.namespace - - skstats.IncrementResourceCount( - ctx, - namespace, - "ingress", - mTranslatorResourcesIn, - ) - - // merge lists by namespace - ingressesByNamespace[namespace] = ingressNamespacedList.list - var ingressList github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.IngressList - for _, ingresses := range ingressesByNamespace { - ingressList = append(ingressList, ingresses...) - } - currentSnapshot.Ingresses = ingressList.Sort() - } - } - }() - return snapshots, errs, nil -} diff --git a/projects/knative/pkg/api/v1/translator_snapshot_simple_emitter.sk.go b/projects/knative/pkg/api/v1/translator_snapshot_simple_emitter.sk.go deleted file mode 100644 index 5a4c67d1d7a..00000000000 --- a/projects/knative/pkg/api/v1/translator_snapshot_simple_emitter.sk.go +++ /dev/null @@ -1,109 +0,0 @@ -// Code generated by solo-kit. DO NOT EDIT. - -package v1 - -import ( - "context" - "fmt" - "time" - - github_com_solo_io_gloo_projects_knative_pkg_api_external_knative "github.com/solo-io/gloo/projects/knative/pkg/api/external/knative" - - "go.opencensus.io/stats" - "go.uber.org/zap" - - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/go-utils/errutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" -) - -type TranslatorSimpleEmitter interface { - Snapshots(ctx context.Context) (<-chan *TranslatorSnapshot, <-chan error, error) -} - -func NewTranslatorSimpleEmitter(aggregatedWatch clients.ResourceWatch) TranslatorSimpleEmitter { - return NewTranslatorSimpleEmitterWithEmit(aggregatedWatch, make(chan struct{})) -} - -func NewTranslatorSimpleEmitterWithEmit(aggregatedWatch clients.ResourceWatch, emit <-chan struct{}) TranslatorSimpleEmitter { - return &translatorSimpleEmitter{ - aggregatedWatch: aggregatedWatch, - forceEmit: emit, - } -} - -type translatorSimpleEmitter struct { - forceEmit <-chan struct{} - aggregatedWatch clients.ResourceWatch -} - -func (c *translatorSimpleEmitter) Snapshots(ctx context.Context) (<-chan *TranslatorSnapshot, <-chan error, error) { - snapshots := make(chan *TranslatorSnapshot) - errs := make(chan error) - - untyped, watchErrs, err := c.aggregatedWatch(ctx) - if err != nil { - return nil, nil, err - } - - go errutils.AggregateErrs(ctx, errs, watchErrs, "translator-emitter") - - go func() { - currentSnapshot := TranslatorSnapshot{} - timer := time.NewTicker(time.Second * 1) - var previousHash uint64 - sync := func() { - currentHash, err := currentSnapshot.Hash(nil) - if err != nil { - contextutils.LoggerFrom(ctx).Panicw("error while hashing, this should never happen", zap.Error(err)) - } - if previousHash == currentHash { - return - } - - previousHash = currentHash - - stats.Record(ctx, mTranslatorSnapshotOut.M(1)) - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - } - - defer func() { - close(snapshots) - close(errs) - }() - - for { - record := func() { stats.Record(ctx, mTranslatorSnapshotIn.M(1)) } - - select { - case <-timer.C: - sync() - case <-ctx.Done(): - return - case <-c.forceEmit: - sentSnapshot := currentSnapshot.Clone() - snapshots <- &sentSnapshot - case untypedList := <-untyped: - record() - - currentSnapshot = TranslatorSnapshot{} - for _, res := range untypedList { - switch typed := res.(type) { - case *github_com_solo_io_gloo_projects_knative_pkg_api_external_knative.Ingress: - currentSnapshot.Ingresses = append(currentSnapshot.Ingresses, typed) - default: - select { - case errs <- fmt.Errorf("TranslatorSnapshotEmitter "+ - "cannot process resource %v of type %T", res.GetMetadata().Ref(), res): - case <-ctx.Done(): - return - } - } - } - - } - } - }() - return snapshots, errs, nil -} diff --git a/projects/knative/pkg/translator/translate.go b/projects/knative/pkg/translator/translate.go deleted file mode 100644 index d914214aad5..00000000000 --- a/projects/knative/pkg/translator/translate.go +++ /dev/null @@ -1,303 +0,0 @@ -package translator - -import ( - "context" - "fmt" - "sort" - "strconv" - "strings" - - "github.com/golang/protobuf/ptypes/wrappers" - - envoycore_sk "github.com/solo-io/solo-kit/pkg/api/external/envoy/api/v2/core" - - "knative.dev/networking/pkg/apis/networking" - "knative.dev/pkg/network" - - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/core/matchers" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/ssl" - "k8s.io/apimachinery/pkg/util/sets" - - v1alpha1 "github.com/solo-io/gloo/projects/knative/pkg/api/external/knative" - - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/options/headers" - - errors "github.com/rotisserie/eris" - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - "github.com/solo-io/go-utils/log" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - knativev1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1" -) - -const ( - ingressClassAnnotation = networking.IngressClassAnnotationKey - glooIngressClass = "gloo.ingress.networking.knative.dev" -) - -const ( - bindPortHttp = 8080 - bindPortHttps = 8443 - - // a comma-separated list of sni domains - sslAnnotationKeySniDomains = "gloo.networking.knative.dev/ssl.sni_domains" - // the name of the secret containing tls certs - sslAnnotationKeySecretName = "gloo.networking.knative.dev/ssl.secret_name" - // the namespace of the secret containing tls certs - // defaults to the ingress' namespace - sslAnnotationKeySecretNamespace = "gloo.networking.knative.dev/ssl.secret_namespace" -) - -func sslConfigFromAnnotations(annotations map[string]string, namespace string) *ssl.SslConfig { - secretName, ok := annotations[sslAnnotationKeySecretName] - if !ok { - return nil - } - - secretNamespace, ok := annotations[sslAnnotationKeySecretNamespace] - if !ok { - secretNamespace = namespace - } - - sniDomains := strings.Split(annotations[sslAnnotationKeySniDomains], ",") - - return &ssl.SslConfig{ - SslSecrets: &ssl.SslConfig_SecretRef{ - SecretRef: &core.ResourceRef{ - Name: secretName, - Namespace: secretNamespace, - }, - }, - SniDomains: sniDomains, - } -} - -func translateProxy(ctx context.Context, proxyName, proxyNamespace string, ingresses v1alpha1.IngressList) (*gloov1.Proxy, error) { - // use map of *core.Metadata to support both Ingress and ClusterIngress, - // which share the same Spec type - ingressSpecsByRef := make(map[*core.Metadata]knativev1alpha1.IngressSpec) - for _, ing := range ingresses { - meta := ing.GetMetadata() - ingressSpecsByRef[meta] = ing.Spec - } - return TranslateProxyFromSpecs(ctx, proxyName, proxyNamespace, ingressSpecsByRef) -} - -// made public to be shared with the (soon to be deprecated) clusteringress controller -func TranslateProxyFromSpecs(ctx context.Context, proxyName, proxyNamespace string, ingresses map[*core.Metadata]knativev1alpha1.IngressSpec) (*gloov1.Proxy, error) { - virtualHostsHttp, virtualHostsHttps, sslConfigs, err := routingConfig(ctx, ingresses) - if err != nil { - return nil, errors.Wrapf(err, "computing virtual hosts") - } - var listeners []*gloov1.Listener - if len(virtualHostsHttp) > 0 { - listeners = append(listeners, &gloov1.Listener{ - Name: "http", - BindAddress: "::", - BindPort: bindPortHttp, - ListenerType: &gloov1.Listener_HttpListener{ - HttpListener: &gloov1.HttpListener{ - VirtualHosts: virtualHostsHttp, - }, - }, - }) - } - if len(virtualHostsHttps) > 0 { - listeners = append(listeners, &gloov1.Listener{ - Name: "https", - BindAddress: "::", - BindPort: bindPortHttps, - ListenerType: &gloov1.Listener_HttpListener{ - HttpListener: &gloov1.HttpListener{ - VirtualHosts: virtualHostsHttps, - }, - }, - SslConfigurations: sslConfigs, - }) - } - return &gloov1.Proxy{ - Metadata: &core.Metadata{ - Name: proxyName, // must match envoy role - Namespace: proxyNamespace, - }, - Listeners: listeners, - }, nil -} - -func routingConfig(_ context.Context, ingresses map[*core.Metadata]knativev1alpha1.IngressSpec) ([]*gloov1.VirtualHost, []*gloov1.VirtualHost, []*ssl.SslConfig, error) { - - var virtualHostsHttp, virtualHostsHttps []*gloov1.VirtualHost - var sslConfigs []*ssl.SslConfig - for ing, spec := range ingresses { - - for _, tls := range spec.TLS { - secretNamespace := tls.SecretNamespace - if secretNamespace == "" { - // default to namespace shared with ingress - secretNamespace = ing.GetNamespace() - } - - sslConfigs = append(sslConfigs, &ssl.SslConfig{ - SniDomains: tls.Hosts, - SslSecrets: &ssl.SslConfig_SecretRef{ - // pass secret through to gloo, - // allow Gloo to perform secret validation - SecretRef: &core.ResourceRef{ - Namespace: secretNamespace, - Name: tls.SecretName, - }, - }, - }) - } - - // use tls if spec contains tls, or user sets with annotations - useTls := len(spec.TLS) > 0 - - if customSsl := sslConfigFromAnnotations(ing.GetAnnotations(), ing.GetNamespace()); customSsl != nil { - useTls = true - sslConfigs = append(sslConfigs, customSsl) - } - - for i, rule := range spec.Rules { - var routes []*gloov1.Route - if rule.HTTP == nil { - log.Warnf("rule %v in knative ingress %v is missing HTTP field", i, ing.GetName()) - continue - } - for _, route := range rule.HTTP.Paths { - pathRegex := route.Path - if pathRegex == "" { - pathRegex = ".*" - } - - action, err := routeActionFromSplits(route.Splits) - if err != nil { - return nil, nil, nil, errors.Wrapf(err, "") - } - - route := &gloov1.Route{ - Matchers: []*matchers.Matcher{{ - PathSpecifier: &matchers.Matcher_Regex{ - Regex: pathRegex, - }, - }}, - Action: &gloov1.Route_RouteAction{ - RouteAction: action, - }, - Options: &gloov1.RouteOptions{ - HeaderManipulation: getHeaderManipulation(route.AppendHeaders), - }, - } - routes = append(routes, route) - - } - - var hosts []string - for _, host := range expandHosts(rule.Hosts) { - hosts = append(hosts, host) - if useTls { - hosts = append(hosts, fmt.Sprintf("%v:%v", host, bindPortHttps)) - } else { - hosts = append(hosts, fmt.Sprintf("%v:%v", host, bindPortHttp)) - } - } - - vh := &gloov1.VirtualHost{ - Name: ing.Ref().Key() + "-" + strconv.Itoa(i), - Domains: hosts, - Routes: routes, - } - - if useTls { - virtualHostsHttps = append(virtualHostsHttps, vh) - } else { - virtualHostsHttp = append(virtualHostsHttp, vh) - } - } - } - - sort.SliceStable(virtualHostsHttp, func(i, j int) bool { - return virtualHostsHttp[i].GetName() < virtualHostsHttp[j].GetName() - }) - sort.SliceStable(virtualHostsHttps, func(i, j int) bool { - return virtualHostsHttps[i].GetName() < virtualHostsHttps[j].GetName() - }) - return virtualHostsHttp, virtualHostsHttps, sslConfigs, nil -} - -func routeActionFromSplits(splits []knativev1alpha1.IngressBackendSplit) (*gloov1.RouteAction, error) { - switch len(splits) { - case 0: - return nil, errors.Errorf("invalid cluster ingress: must provide at least 1 split") - } - - var destinations []*gloov1.WeightedDestination - for _, split := range splits { - var weightedDestinationPlugins *gloov1.WeightedDestinationOptions - if headerManipulaion := getHeaderManipulation(split.AppendHeaders); headerManipulaion != nil { - weightedDestinationPlugins = &gloov1.WeightedDestinationOptions{ - HeaderManipulation: headerManipulaion, - } - } - weight := uint32(split.Percent) - if len(splits) == 1 { - weight = 100 - } - destinations = append(destinations, &gloov1.WeightedDestination{ - Destination: &gloov1.Destination{ - DestinationType: serviceForSplit(split), - }, - Weight: &wrappers.UInt32Value{Value: weight}, - Options: weightedDestinationPlugins, - }) - } - return &gloov1.RouteAction{ - Destination: &gloov1.RouteAction_Multi{ - Multi: &gloov1.MultiDestination{ - Destinations: destinations, - }, - }, - }, nil -} - -func serviceForSplit(split knativev1alpha1.IngressBackendSplit) *gloov1.Destination_Kube { - return &gloov1.Destination_Kube{ - Kube: &gloov1.KubernetesServiceDestination{ - Ref: &core.ResourceRef{Name: split.ServiceName, Namespace: split.ServiceNamespace}, - Port: uint32(split.ServicePort.IntValue()), - }, - } -} - -func getHeaderManipulation(headersToAppend map[string]string) *headers.HeaderManipulation { - if len(headersToAppend) == 0 { - return nil - } - var headersToAdd []*envoycore_sk.HeaderValueOption - for name, value := range headersToAppend { - headersToAdd = append(headersToAdd, &envoycore_sk.HeaderValueOption{HeaderOption: &envoycore_sk.HeaderValueOption_Header{Header: &envoycore_sk.HeaderValue{Key: name, Value: value}}}) - } - return &headers.HeaderManipulation{ - RequestHeadersToAdd: headersToAdd, - } -} - -// trim kube dns suffixes -// undocumented requirement -// see https://github.com/knative/serving/blob/main/pkg/reconciler/ingress/resources/virtual_service.go#L281 -func expandHosts(hosts []string) []string { - expanded := sets.NewString() - allowedSuffixes := []string{ - "", - "." + network.GetClusterDomainName(), - ".svc." + network.GetClusterDomainName(), - } - for _, h := range hosts { - for _, suffix := range allowedSuffixes { - if strings.HasSuffix(h, suffix) { - expanded.Insert(strings.TrimSuffix(h, suffix)) - } - } - } - - return expanded.List() -} diff --git a/projects/knative/pkg/translator/translate_test.go b/projects/knative/pkg/translator/translate_test.go deleted file mode 100644 index 1aab8dc6c1a..00000000000 --- a/projects/knative/pkg/translator/translate_test.go +++ /dev/null @@ -1,399 +0,0 @@ -package translator - -import ( - "context" - "time" - - "github.com/golang/protobuf/ptypes/wrappers" - - "github.com/golang/protobuf/ptypes" - "github.com/golang/protobuf/ptypes/duration" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/core/matchers" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/options/headers" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/ssl" - "github.com/solo-io/gloo/projects/knative/api/external/knative" - v1alpha12 "github.com/solo-io/gloo/projects/knative/pkg/api/external/knative" - v1 "github.com/solo-io/gloo/projects/knative/pkg/api/v1" - envoycore_sk "github.com/solo-io/solo-kit/pkg/api/external/envoy/api/v2/core" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - . "github.com/solo-io/solo-kit/test/matchers" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "knative.dev/networking/pkg/apis/networking/v1alpha1" -) - -var _ = Describe("Translate", func() { - It("creates the appropriate proxy object for the provided ingress objects", func() { - namespace := "example" - serviceName := "peteszah-service" - serviceNamespace := "peteszah-service-namespace" - servicePort := int32(8080) - secretName := "areallygreatsecret" - ingress := &v1alpha1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ing", - Namespace: namespace, - }, - Spec: v1alpha1.IngressSpec{ - Rules: []v1alpha1.IngressRule{ - { - Hosts: []string{"petes.com", "zah.net", "mysvc.myns.svc.cluster.local", "mysvc.myns.example.com"}, - HTTP: &v1alpha1.HTTPIngressRuleValue{ - Paths: []v1alpha1.HTTPIngressPath{ - { - Path: "/", - Splits: []v1alpha1.IngressBackendSplit{ - { - IngressBackend: v1alpha1.IngressBackend{ - ServiceName: serviceName, - ServiceNamespace: serviceNamespace, - ServicePort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: servicePort, - }, - }, - }, - }, - AppendHeaders: map[string]string{"add": "me"}, - }, - }, - }, - }, - { - Hosts: []string{"pog.com", "champ.net", "zah.net"}, - HTTP: &v1alpha1.HTTPIngressRuleValue{ - Paths: []v1alpha1.HTTPIngressPath{ - { - Path: "/hay", - Splits: []v1alpha1.IngressBackendSplit{ - { - IngressBackend: v1alpha1.IngressBackend{ - ServiceName: serviceName, - ServiceNamespace: serviceNamespace, - ServicePort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: servicePort, - }, - }, - }, - }, - AppendHeaders: map[string]string{"add": "me"}, - }, - }, - }, - }, - }, - }, - } - ingressTls := &v1alpha1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ing-tls", - Namespace: namespace, - }, - Spec: v1alpha1.IngressSpec{ - TLS: []v1alpha1.IngressTLS{ - { - Hosts: []string{"petes.com"}, - SecretName: secretName, - }, - }, - Rules: []v1alpha1.IngressRule{ - { - Hosts: []string{"petes.com", "zah.net"}, - HTTP: &v1alpha1.HTTPIngressRuleValue{ - Paths: []v1alpha1.HTTPIngressPath{ - { - Path: "/", - Splits: []v1alpha1.IngressBackendSplit{ - { - IngressBackend: v1alpha1.IngressBackend{ - ServiceName: serviceName, - ServiceNamespace: serviceNamespace, - ServicePort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: servicePort, - }, - }, - }, - }, - AppendHeaders: map[string]string{"add": "me"}, - }, - }, - }, - }, - }, - }, - } - ingressRes := &v1alpha12.Ingress{Ingress: knative.Ingress(*ingress)} - ingressResTls := &v1alpha12.Ingress{Ingress: knative.Ingress(*ingressTls)} - snap := &v1.TranslatorSnapshot{ - Ingresses: v1alpha12.IngressList{ingressRes, ingressResTls}, - } - proxy, errs := translateProxy(context.TODO(), "test", namespace, snap.Ingresses) - Expect(errs).NotTo(HaveOccurred()) - Expect(proxy.Listeners).To(HaveLen(2)) - Expect(proxy.Listeners[0].Name).To(Equal("http")) - Expect(proxy.Listeners[0].BindPort).To(Equal(uint32(8080))) - - expected := &gloov1.Proxy{ - Listeners: []*gloov1.Listener{ - { - Name: "http", - BindAddress: "::", - BindPort: 8080, - ListenerType: &gloov1.Listener_HttpListener{ - HttpListener: &gloov1.HttpListener{ - VirtualHosts: []*gloov1.VirtualHost{ - { - Name: "example.ing-0", - Domains: []string{ - "mysvc.myns", - "mysvc.myns:8080", - "mysvc.myns.example.com", - "mysvc.myns.example.com:8080", - "mysvc.myns.svc", - "mysvc.myns.svc:8080", - "mysvc.myns.svc.cluster.local", - "mysvc.myns.svc.cluster.local:8080", - "petes.com", - "petes.com:8080", - "zah.net", - "zah.net:8080", - }, - Routes: []*gloov1.Route{ - { - Matchers: []*matchers.Matcher{{ - PathSpecifier: &matchers.Matcher_Regex{ - Regex: "/", - }, - }}, - Action: &gloov1.Route_RouteAction{ - RouteAction: &gloov1.RouteAction{ - Destination: &gloov1.RouteAction_Multi{ - Multi: &gloov1.MultiDestination{ - Destinations: []*gloov1.WeightedDestination{ - { - Destination: &gloov1.Destination{ - DestinationType: &gloov1.Destination_Kube{ - Kube: &gloov1.KubernetesServiceDestination{ - Ref: &core.ResourceRef{ - Name: "peteszah-service", - Namespace: "peteszah-service-namespace", - }, - Port: 8080, - }, - }, - }, - Weight: &wrappers.UInt32Value{Value: 0x00000064}, - }, - }, - }, - }, - }, - }, - Options: &gloov1.RouteOptions{ - HeaderManipulation: &headers.HeaderManipulation{ - RequestHeadersToAdd: []*envoycore_sk.HeaderValueOption{{HeaderOption: &envoycore_sk.HeaderValueOption_Header{Header: &envoycore_sk.HeaderValue{Key: "add", Value: "me"}}}}, - }, - }, - }, - }, - }, - { - Name: "example.ing-1", - Domains: []string{ - "champ.net", - "champ.net:8080", - "pog.com", - "pog.com:8080", - "zah.net", - "zah.net:8080", - }, - Routes: []*gloov1.Route{ - { - Matchers: []*matchers.Matcher{{ - PathSpecifier: &matchers.Matcher_Regex{ - Regex: "/hay", - }, - }}, - Action: &gloov1.Route_RouteAction{ - RouteAction: &gloov1.RouteAction{ - Destination: &gloov1.RouteAction_Multi{ - Multi: &gloov1.MultiDestination{ - Destinations: []*gloov1.WeightedDestination{ - { - Destination: &gloov1.Destination{ - DestinationType: &gloov1.Destination_Kube{ - Kube: &gloov1.KubernetesServiceDestination{ - Ref: &core.ResourceRef{ - Name: "peteszah-service", - Namespace: "peteszah-service-namespace", - }, - Port: 8080, - }, - }, - }, - Weight: &wrappers.UInt32Value{Value: 0x00000064}, - }, - }, - }, - }, - }, - }, - Options: &gloov1.RouteOptions{ - HeaderManipulation: &headers.HeaderManipulation{ - RequestHeadersToAdd: []*envoycore_sk.HeaderValueOption{{HeaderOption: &envoycore_sk.HeaderValueOption_Header{Header: &envoycore_sk.HeaderValue{Key: "add", Value: "me"}}}}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - Name: "https", - BindAddress: "::", - BindPort: 8443, - ListenerType: &gloov1.Listener_HttpListener{ - HttpListener: &gloov1.HttpListener{ - VirtualHosts: []*gloov1.VirtualHost{ - { - Name: "example.ing-tls-0", - Domains: []string{ - "petes.com", - "petes.com:8443", - "zah.net", - "zah.net:8443", - }, - Routes: []*gloov1.Route{ - { - Matchers: []*matchers.Matcher{{ - PathSpecifier: &matchers.Matcher_Regex{ - Regex: "/", - }, - }}, - Action: &gloov1.Route_RouteAction{ - RouteAction: &gloov1.RouteAction{ - Destination: &gloov1.RouteAction_Multi{ - Multi: &gloov1.MultiDestination{ - Destinations: []*gloov1.WeightedDestination{ - { - Destination: &gloov1.Destination{ - DestinationType: &gloov1.Destination_Kube{ - Kube: &gloov1.KubernetesServiceDestination{ - Ref: &core.ResourceRef{ - Name: "peteszah-service", - Namespace: "peteszah-service-namespace", - }, - Port: 8080, - }, - }, - }, - Weight: &wrappers.UInt32Value{Value: 0x00000064}, - }, - }, - }, - }, - }, - }, - Options: &gloov1.RouteOptions{ - HeaderManipulation: &headers.HeaderManipulation{ - RequestHeadersToAdd: []*envoycore_sk.HeaderValueOption{{HeaderOption: &envoycore_sk.HeaderValueOption_Header{Header: &envoycore_sk.HeaderValue{Key: "add", Value: "me"}}}}, - }, - }, - }, - }, - }, - }, - }, - }, - SslConfigurations: []*ssl.SslConfig{ - { - SslSecrets: &ssl.SslConfig_SecretRef{ - SecretRef: &core.ResourceRef{ - Name: "areallygreatsecret", - Namespace: "example", - }, - }, - SniDomains: []string{ - "petes.com", - }, - }, - }, - }, - }, - Metadata: &core.Metadata{ - Name: "test", - Namespace: "example", - }, - } - Expect(proxy).To(MatchProto(expected)) - }) - - It("renders proxies on ssl config based on annotations", func() { - namespace := "example" - serviceName := "peteszah-service" - serviceNamespace := "peteszah-service-namespace" - servicePort := int32(8080) - secretName := "areallygreatsecret" - secretNamespace := "areallygreatnamespace" - annotations := map[string]string{ - sslAnnotationKeySniDomains: "domain.com,domain.io", - sslAnnotationKeySecretName: secretName, - sslAnnotationKeySecretNamespace: secretNamespace, - } - ingress := &v1alpha12.Ingress{Ingress: knative.Ingress{ - - ObjectMeta: metav1.ObjectMeta{ - Name: "with-ssl-annotations", - Namespace: namespace, - Annotations: annotations, - }, - Spec: v1alpha1.IngressSpec{ - Rules: []v1alpha1.IngressRule{ - { - Hosts: []string{"domain.com"}, - HTTP: &v1alpha1.HTTPIngressRuleValue{ - Paths: []v1alpha1.HTTPIngressPath{ - { - Path: "/", - Splits: []v1alpha1.IngressBackendSplit{ - { - IngressBackend: v1alpha1.IngressBackend{ - ServiceName: serviceName, - ServiceNamespace: serviceNamespace, - ServicePort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: servicePort, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }} - proxy, errs := translateProxy(context.TODO(), "test", namespace, v1alpha12.IngressList{ingress}) - Expect(errs).NotTo(HaveOccurred()) - Expect(proxy.Listeners).To(HaveLen(1)) - Expect(proxy.Listeners[0].Name).To(Equal("https")) - Expect(proxy.Listeners[0].BindPort).To(Equal(uint32(8443))) - Expect(proxy.Listeners[0].SslConfigurations).To(HaveLen(1)) - Expect(proxy.Listeners[0].SslConfigurations[0].SslSecrets).To(Equal(&ssl.SslConfig_SecretRef{SecretRef: &core.ResourceRef{Name: secretName, Namespace: secretNamespace}})) - Expect(proxy.Listeners[0].SslConfigurations[0].SniDomains).To(Equal([]string{"domain.com", "domain.io"})) - }) -}) - -func durptr(d int) *duration.Duration { - dur := time.Duration(d) - return ptypes.DurationProto(dur) -} diff --git a/projects/knative/pkg/translator/translator_suite_test.go b/projects/knative/pkg/translator/translator_suite_test.go deleted file mode 100644 index 677a31d29cf..00000000000 --- a/projects/knative/pkg/translator/translator_suite_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package translator_test - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) - -func TestTranslator(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Translator Suite") -} diff --git a/projects/knative/pkg/translator/translator_syncer.go b/projects/knative/pkg/translator/translator_syncer.go deleted file mode 100644 index 486d2d801f3..00000000000 --- a/projects/knative/pkg/translator/translator_syncer.go +++ /dev/null @@ -1,220 +0,0 @@ -package translator - -import ( - "context" - "time" - - "github.com/solo-io/gloo/pkg/utils/syncutil" - "github.com/solo-io/go-utils/hashutils" - "go.uber.org/zap/zapcore" - - "golang.org/x/sync/errgroup" - - "github.com/rotisserie/eris" - "github.com/solo-io/gloo/projects/gateway/pkg/utils" - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - glooutils "github.com/solo-io/gloo/projects/gloo/pkg/utils" - v1alpha1 "github.com/solo-io/gloo/projects/knative/pkg/api/external/knative" - v1 "github.com/solo-io/gloo/projects/knative/pkg/api/v1" - "github.com/solo-io/go-utils/contextutils" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - knativev1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1" - knativeclient "knative.dev/networking/pkg/client/clientset/versioned/typed/networking/v1alpha1" -) - -type translatorSyncer struct { - externalProxyAddress string - internalProxyAddress string - writeNamespace string - writeErrs chan error - proxyClient gloov1.ProxyClient - proxyReconciler gloov1.ProxyReconciler - ingressClient knativeclient.IngressesGetter - requireIngressClass bool - - statusClient resources.StatusClient - - // injection for testing - translateProxy func(ctx context.Context, proxyName, proxyNamespace string, ingresses v1alpha1.IngressList) (*gloov1.Proxy, error) -} - -func NewSyncer(externalProxyAddress, internalProxyAddress, writeNamespace string, proxyClient gloov1.ProxyClient, ingressClient knativeclient.IngressesGetter, writeErrs chan error, requireIngressClass bool, statusClient resources.StatusClient) v1.TranslatorSyncer { - return &translatorSyncer{ - externalProxyAddress: externalProxyAddress, - internalProxyAddress: internalProxyAddress, - writeNamespace: writeNamespace, - writeErrs: writeErrs, - proxyClient: proxyClient, - ingressClient: ingressClient, - proxyReconciler: gloov1.NewProxyReconciler(proxyClient, statusClient), - requireIngressClass: requireIngressClass, - statusClient: statusClient, - translateProxy: translateProxy, - } -} - -const ( - externalProxyName = "knative-external-proxy" - internalProxyName = "knative-internal-proxy" -) - -// enforce ingress class if requirement is set -func (s *translatorSyncer) shouldProcess(ingress *v1alpha1.Ingress) bool { - if !s.requireIngressClass { - return true - } - if len(ingress.Annotations) == 0 { - return false - } - return ingress.Annotations[ingressClassAnnotation] == glooIngressClass -} - -func (s *translatorSyncer) Sync(ctx context.Context, snap *v1.TranslatorSnapshot) error { - ctx = contextutils.WithLogger(ctx, "translatorSyncer") - - snapHash := hashutils.MustHash(snap) - logger := contextutils.LoggerFrom(ctx) - logger.Infof("begin sync %v (%v knative ingresses)", snapHash, - len(snap.Ingresses), - ) - defer logger.Infof("end sync %v", snapHash) - - // stringifying the snapshot may be an expensive operation, so we'd like to avoid building the large - // string if we're not even going to log it anyway - if contextutils.GetLogLevel() == zapcore.DebugLevel { - logger.Debug(syncutil.StringifySnapshot(snap)) - } - - // split ingresses by their visibility, create a proxy for each - var externalIngresses, internalIngresses v1alpha1.IngressList - - for _, ing := range snap.Ingresses { - if !s.shouldProcess(ing) { - continue - } - - if ing.IsPublic() { - externalIngresses = append(externalIngresses, ing) - } - internalIngresses = append(internalIngresses, ing) - } - - externalProxy, err := s.translateProxy(ctx, externalProxyName, s.writeNamespace, externalIngresses) - if err != nil { - logger.Warnf("snapshot %v was rejected due to invalid config: %v\n"+ - "knative ingress externalProxy will not be updated.", snapHash, err) - return err - } - - internalProxy, err := s.translateProxy(ctx, internalProxyName, s.writeNamespace, internalIngresses) - if err != nil { - logger.Warnf("snapshot %v was rejected due to invalid config: %v\n"+ - "knative ingress externalProxy will not be updated.", snapHash, err) - return err - } - - labels := map[string]string{ - glooutils.ProxyTypeKey: glooutils.KnativeProxyValue, - } - - var desiredResources gloov1.ProxyList - if externalProxy != nil { - logger.Infof("creating external proxy %v", externalProxy.GetMetadata().Ref()) - externalProxy.GetMetadata().Labels = labels - desiredResources = append(desiredResources, externalProxy) - } - - if internalProxy != nil { - logger.Infof("creating internal proxy %v", internalProxy.GetMetadata().Ref()) - internalProxy.GetMetadata().Labels = labels - desiredResources = append(desiredResources, internalProxy) - } - - proxyTransitionFunction := utils.TransitionFunction(s.statusClient) - - if err := s.proxyReconciler.Reconcile(s.writeNamespace, desiredResources, proxyTransitionFunction, clients.ListOpts{ - Ctx: ctx, - Selector: labels, - }); err != nil { - return err - } - - g := &errgroup.Group{} - g.Go(func() error { - if err := s.propagateProxyStatus(ctx, externalProxy, externalIngresses); err != nil { - return eris.Wrapf(err, "failed to propagate external proxy status "+ - "to ingress objects") - } - return nil - }) - g.Go(func() error { - if err := s.propagateProxyStatus(ctx, internalProxy, internalIngresses); err != nil { - return eris.Wrapf(err, "failed to propagate internal proxy status "+ - "to ingress objects") - } - return nil - }) - - return nil -} - -// propagate to all ingresses the status of the proxy -func (s *translatorSyncer) propagateProxyStatus(ctx context.Context, proxy *gloov1.Proxy, ingresses v1alpha1.IngressList) error { - if proxy == nil { - return nil - } - ticker := time.Tick(time.Second / 2) - for { - select { - case <-ctx.Done(): - return nil - case <-ticker: - // poll the proxy for an accepted or rejected status - updatedProxy, err := s.proxyClient.Read(proxy.GetMetadata().GetNamespace(), proxy.GetMetadata().GetName(), clients.ReadOpts{Ctx: ctx}) - if err != nil { - return err - } - - updatedProxyStatus := s.statusClient.GetStatus(updatedProxy) - switch updatedProxyStatus.GetState() { - case core.Status_Pending: - continue - case core.Status_Rejected: - contextutils.LoggerFrom(ctx).Errorf("proxy was rejected by gloo: %v", updatedProxyStatus.GetReason()) - continue - case core.Status_Accepted: - return s.markIngressesReady(ctx, ingresses) - } - } - } -} - -func (s *translatorSyncer) markIngressesReady(ctx context.Context, ingresses v1alpha1.IngressList) error { - var updatedIngresses []*knativev1alpha1.Ingress - for _, wrappedCi := range ingresses { - ci := knativev1alpha1.Ingress(wrappedCi.Ingress) - if ci.Status.ObservedGeneration == ci.ObjectMeta.Generation { - continue - } - ci.Status.InitializeConditions() - ci.Status.MarkNetworkConfigured() - externalLbStatus := []knativev1alpha1.LoadBalancerIngressStatus{ - {DomainInternal: s.externalProxyAddress}, - } - internalLbStatus := []knativev1alpha1.LoadBalancerIngressStatus{ - {DomainInternal: s.internalProxyAddress}, - } - ci.Status.MarkLoadBalancerReady(externalLbStatus, internalLbStatus) - ci.Status.ObservedGeneration = ci.Generation - updatedIngresses = append(updatedIngresses, &ci) - } - for _, ingress := range updatedIngresses { - if _, err := s.ingressClient.Ingresses(ingress.Namespace).UpdateStatus(ctx, ingress, metav1.UpdateOptions{}); err != nil { - contextutils.LoggerFrom(ctx).Errorf("failed to update Ingress %v status with error %v", ingress.Name, err) - } - } - return nil -} diff --git a/projects/knative/pkg/translator/translator_syncer_test.go b/projects/knative/pkg/translator/translator_syncer_test.go deleted file mode 100644 index d4dd82f7d1a..00000000000 --- a/projects/knative/pkg/translator/translator_syncer_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package translator - -import ( - "context" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/watch" - knativev1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1" - v1alpha13 "knative.dev/networking/pkg/client/clientset/versioned/typed/networking/v1alpha1" - - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/factory" - "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory" - "github.com/solo-io/solo-kit/pkg/api/v1/resources" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" - - gloostatusutils "github.com/solo-io/gloo/pkg/utils/statusutils" - v1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - "github.com/solo-io/gloo/projects/knative/api/external/knative" - v1alpha1 "github.com/solo-io/gloo/projects/knative/pkg/api/external/knative" - knativev1 "github.com/solo-io/gloo/projects/knative/pkg/api/v1" -) - -var _ = Describe("TranslatorSyncer", func() { - var ( - proxyAddressExternal = "proxy-external-address" - proxyAddressInternal = "proxy-internal-address" - namespace = "write-namespace" - proxyClient v1.ProxyClient - knativeClient v1alpha13.IngressesGetter - ingress *v1alpha1.Ingress - proxy *v1.Proxy - ctx context.Context - cancel context.CancelFunc - statusClient resources.StatusClient - ) - BeforeEach(func() { - ctx, cancel = context.WithCancel(context.Background()) - proxyClient, _ = v1.NewProxyClient(ctx, &factory.MemoryResourceClientFactory{Cache: memory.NewInMemoryResourceCache()}) - ingress = &v1alpha1.Ingress{Ingress: knative.Ingress{ObjectMeta: metav1.ObjectMeta{Generation: 1}, - Spec: knativev1alpha1.IngressSpec{ - Rules: []knativev1alpha1.IngressRule{{ - Hosts: []string{"*"}, - HTTP: &knativev1alpha1.HTTPIngressRuleValue{ - Paths: []knativev1alpha1.HTTPIngressPath{ - { - Path: "/hay", - Splits: []knativev1alpha1.IngressBackendSplit{ - { - IngressBackend: knativev1alpha1.IngressBackend{ - ServiceName: "a", - ServiceNamespace: "b", - ServicePort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: 1234, - }, - }, - }, - }, - }, - }}, - }, - }}, - }} - knativeClient = &mockCiClient{ci: toKube(ingress)} - proxy = &v1.Proxy{Metadata: &core.Metadata{Name: "hi", Namespace: "howareyou"}} - proxy, _ = proxyClient.Write(proxy, clients.WriteOpts{}) - statusClient = gloostatusutils.GetStatusClientForNamespace("ns") - }) - - AfterEach(func() { - cancel() - }) - - It("only processes annotated proxies when requireIngressClass is set to true successful proxy status to the ingresses it was created from", func() { - syncer := NewSyncer(proxyAddressExternal, proxyAddressInternal, namespace, proxyClient, knativeClient, make(chan error), true, statusClient).(*translatorSyncer) - - // expect ingress without class to be ignored - err := syncer.Sync(context.TODO(), &knativev1.TranslatorSnapshot{ - Ingresses: []*v1alpha1.Ingress{ingress}, - }) - Expect(err).NotTo(HaveOccurred()) - - // expect the ingress to be ignored - // we should have no listeners - proxies, err := proxyClient.List(namespace, clients.ListOpts{}) - Expect(err).NotTo(HaveOccurred()) - Expect(proxies).To(HaveLen(2)) - Expect(proxies[0].Listeners).To(BeEmpty()) - - ingress.Annotations = map[string]string{ - ingressClassAnnotation: glooIngressClass, - } - - err = syncer.Sync(context.TODO(), &knativev1.TranslatorSnapshot{ - Ingresses: []*v1alpha1.Ingress{ingress}, - }) - Expect(err).NotTo(HaveOccurred()) - - // expect a proxy to be created - proxies, err = proxyClient.List(namespace, clients.ListOpts{}) - Expect(err).NotTo(HaveOccurred()) - Expect(proxies).To(HaveLen(2)) - Expect(proxies[0].Listeners).To(HaveLen(1)) - Expect(proxies[0].Listeners[0].GetHttpListener()).NotTo(BeNil()) - Expect(proxies[0].Listeners[0].GetHttpListener().VirtualHosts).To(HaveLen(1)) - }) - - It("propagates successful proxy status to the ingresses it was created from", func() { - // requireIngressClass = true - syncer := NewSyncer(proxyAddressExternal, proxyAddressInternal, namespace, proxyClient, knativeClient, make(chan error), false, statusClient).(*translatorSyncer) - - go func() { - defer GinkgoRecover() - // update status after a 1s sleep - time.Sleep(time.Second / 5) - statusClient.SetStatus(proxy, &core.Status{ - State: core.Status_Accepted, - }) - - _, err := proxyClient.Write(proxy, clients.WriteOpts{OverwriteExisting: true}) - Expect(err).NotTo(HaveOccurred()) - }() - - err := syncer.propagateProxyStatus(context.TODO(), proxy, v1alpha1.IngressList{ingress}) - Expect(err).NotTo(HaveOccurred()) - - // _ formally used as 'ci' - ci, err := knativeClient.Ingresses(ingress.Namespace).Get(ctx, ingress.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - Expect(ci.IsReady()).To(BeTrue()) - }) - - It("puts all ingresses on the internal proxy", func() { - syncer := NewSyncer(proxyAddressExternal, proxyAddressInternal, namespace, proxyClient, knativeClient, make(chan error), false, statusClient).(*translatorSyncer) - - // the ingresses loaded to each proxy - proxiesWithIngresses := make(map[string]v1alpha1.IngressList) - syncer.translateProxy = func(ctx context.Context, proxyName, proxyNamespace string, ingresses v1alpha1.IngressList) (proxy *v1.Proxy, err error) { - proxiesWithIngresses[proxyName] = ingresses - return nil, nil - } - - externalIngress := &v1alpha1.Ingress{ - Ingress: knative.Ingress{ - ObjectMeta: metav1.ObjectMeta{Generation: 1}, - Spec: knativev1alpha1.IngressSpec{}, - }, - } - internalIngress := &v1alpha1.Ingress{ - Ingress: knative.Ingress{ - ObjectMeta: metav1.ObjectMeta{Generation: 1}, - Spec: knativev1alpha1.IngressSpec{ - Rules: []knativev1alpha1.IngressRule{ - {Visibility: knativev1alpha1.IngressVisibilityClusterLocal}, - }, - }, - }, - } - - // sync with an ClusterLocal and an External visibility service - err := syncer.Sync(context.TODO(), &knativev1.TranslatorSnapshot{ - Ingresses: []*v1alpha1.Ingress{ - externalIngress, - internalIngress, - }, - }) - Expect(err).NotTo(HaveOccurred()) - - // the External should be on both ingresses - Expect(proxiesWithIngresses[externalProxyName]).To(HaveLen(1)) - Expect(proxiesWithIngresses[internalProxyName]).To(HaveLen(2)) - }) -}) - -func toKube(ci *v1alpha1.Ingress) *knativev1alpha1.Ingress { - kubeCi := knativev1alpha1.Ingress(ci.Ingress) - return &kubeCi -} - -type mockCiClient struct{ ci *knativev1alpha1.Ingress } - -func (c *mockCiClient) Ingresses(namespace string) v1alpha13.IngressInterface { - return c -} - -func (c *mockCiClient) UpdateStatus(ctx context.Context, ci *knativev1alpha1.Ingress, opts metav1.UpdateOptions) (*knativev1alpha1.Ingress, error) { - c.ci.Status = ci.Status - return ci, nil -} - -func (*mockCiClient) Create(ctx context.Context, ci *knativev1alpha1.Ingress, opts metav1.CreateOptions) (*knativev1alpha1.Ingress, error) { - panic("implement me") -} - -func (*mockCiClient) Update(ctx context.Context, ci *knativev1alpha1.Ingress, opts metav1.UpdateOptions) (*knativev1alpha1.Ingress, error) { - panic("implement me") -} - -func (*mockCiClient) Delete(ctx context.Context, name string, options metav1.DeleteOptions) error { - panic("implement me") -} - -func (*mockCiClient) DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error { - panic("implement me") -} - -func (c *mockCiClient) Get(ctx context.Context, name string, options metav1.GetOptions) (*knativev1alpha1.Ingress, error) { - return c.ci, nil -} - -func (*mockCiClient) List(ctx context.Context, opts metav1.ListOptions) (*knativev1alpha1.IngressList, error) { - panic("implement me") -} - -func (*mockCiClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - panic("implement me") -} - -func (*mockCiClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *knativev1alpha1.Ingress, err error) { - panic("implement me") -} diff --git a/test/e2e/access_log_test.go b/test/e2e/access_log_test.go deleted file mode 100644 index b2c163b73ce..00000000000 --- a/test/e2e/access_log_test.go +++ /dev/null @@ -1,364 +0,0 @@ -package e2e_test - -import ( - "context" - "net/http" - "time" - - "github.com/solo-io/gloo/test/testutils" - - "github.com/solo-io/gloo/test/gomega/matchers" - - envoy_data_accesslog_v3 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" - v1 "github.com/solo-io/gloo/projects/gateway/pkg/api/v1" - "github.com/solo-io/gloo/test/e2e" - - envoyals "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" - structpb "github.com/golang/protobuf/ptypes/struct" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/solo-io/gloo/projects/accesslogger/pkg/loggingservice" - "github.com/solo-io/gloo/projects/accesslogger/pkg/runner" - gwdefaults "github.com/solo-io/gloo/projects/gateway/pkg/defaults" - - gloo_envoy_v3 "github.com/solo-io/gloo/projects/gloo/pkg/api/external/envoy/config/core/v3" - - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/options/als" - alsplugin "github.com/solo-io/gloo/projects/gloo/pkg/plugins/als" - "github.com/solo-io/gloo/projects/gloo/pkg/translator" -) - -var _ = Describe("Access Log", func() { - - var ( - testContext *e2e.TestContext - ) - - BeforeEach(func() { - testContext = testContextFactory.NewTestContext() - testContext.BeforeEach() - // This mutation must happen after the testContext.BeforeEach() becuase that - // is where our VirtualService is constructed. - vs := testContext.ResourcesToCreate().VirtualServices[0] - routeOptions := &gloov1.RouteOptions{ - EnvoyMetadata: map[string]*structpb.Struct{ - "foo-namespace": { - Fields: map[string]*structpb.Value{ - "bar-metadata": { - Kind: &structpb.Value_StringValue{ - StringValue: "greetings", - }, - }}, - }, - }} - vs.GetVirtualHost().GetRoutes()[0].Options = routeOptions - }) - - AfterEach(func() { - testContext.AfterEach() - }) - - JustBeforeEach(func() { - testContext.JustBeforeEach() - }) - - JustAfterEach(func() { - testContext.JustAfterEach() - }) - - Context("Grpc", func() { - - var ( - msgChan <-chan *envoy_data_accesslog_v3.HTTPAccessLogEntry - ) - - BeforeEach(func() { - msgChan = runAccessLog(testContext.Ctx(), testContext.EnvoyInstance().AccessLogPort) - - gw := gwdefaults.DefaultGateway(writeNamespace) - gw.Options = &gloov1.ListenerOptions{ - AccessLoggingService: &als.AccessLoggingService{ - AccessLog: []*als.AccessLog{ - { - OutputDestination: &als.AccessLog_GrpcService{ - GrpcService: &als.GrpcService{ - LogName: "test-log", - ServiceRef: &als.GrpcService_StaticClusterName{ - StaticClusterName: alsplugin.ClusterName, - }, - }, - }, - }, - }, - }, - } - - testContext.ResourcesToCreate().Gateways = v1.GatewayList{ - gw, - } - }) - - It("can stream access logs", func() { - requestBuilder := testContext.GetHttpRequestBuilder() - Eventually(func(g Gomega) { - g.Expect(testutils.DefaultHttpClient.Do(requestBuilder.Build())).Should(matchers.HaveOkResponse()) - - var entry *envoy_data_accesslog_v3.HTTPAccessLogEntry - g.Eventually(msgChan, 2*time.Second).Should(Receive(&entry)) - g.Expect(entry.CommonProperties.UpstreamCluster).To(Equal(translator.UpstreamToClusterName(testContext.TestUpstream().Upstream.Metadata.Ref()))) - }, time.Second*21, time.Second*2).Should(Succeed()) - }) - - }) - - Context("File", func() { - var gw *v1.Gateway - Context("String Format", func() { - BeforeEach(func() { - gw = gwdefaults.DefaultGateway(writeNamespace) - gw.Options = &gloov1.ListenerOptions{ - AccessLoggingService: &als.AccessLoggingService{ - AccessLog: []*als.AccessLog{ - { - OutputDestination: &als.AccessLog_FileSink{ - FileSink: &als.FileSink{ - Path: "/dev/stdout", - OutputFormat: &als.FileSink_StringFormat{ - StringFormat: "", - }, - }, - }, - }, - }, - }, - } - - testContext.ResourcesToCreate().Gateways = v1.GatewayList{ - gw, - } - }) - It("can create string access logs", func() { - requestBuilder := testContext.GetHttpRequestBuilder(). - WithPath("1"). - WithQuery("foo=bar"). - WithPostMethod() - Eventually(func(g Gomega) { - g.Expect(testutils.DefaultHttpClient.Do(requestBuilder.Build())).Should(matchers.HaveOkResponse()) - - logs, err := testContext.EnvoyInstance().Logs() - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(logs).To(ContainSubstring(`"POST /1?foo=bar HTTP/1.1" 200`)) - }, time.Second*30, time.Second/2).Should(Succeed()) - }) - Context("Formatter extensions", func() { - BeforeEach(func() { - gw.GetOptions().GetAccessLoggingService().GetAccessLog()[0].GetFileSink().OutputFormat = &als.FileSink_StringFormat{ - StringFormat: "req: %REQ(:PATH)%\n" + - "req_without_query: %REQ_WITHOUT_QUERY(:PATH)%\n" + - "metadata: %METADATA(ROUTE:foo-namespace)%\n", - } - }) - It("can create formatted string access logs", func() { - requestBuilder := testContext.GetHttpRequestBuilder(). - WithPath("1"). - WithQuery("sensitive=data&needs=removed"). - WithPostMethod() - Eventually(func(g Gomega) { - g.Expect(testutils.DefaultHttpClient.Do(requestBuilder.Build())).Should(matchers.HaveOkResponse()) - - logs, err := testContext.EnvoyInstance().Logs() - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(logs).To(ContainSubstring(`req: /1?sensitive=data&needs=removed`)) - g.Expect(logs).To(ContainSubstring(`req_without_query: /1`)) - g.Expect(logs).To(ContainSubstring(`metadata: {"bar-metadata":"greetings"}`)) - }, time.Second*30, time.Second/2).Should(Succeed()) - }) - - }) - }) - - Context("Json Format", func() { - - BeforeEach(func() { - gw := gwdefaults.DefaultGateway(writeNamespace) - gw.Options = &gloov1.ListenerOptions{ - AccessLoggingService: &als.AccessLoggingService{ - AccessLog: []*als.AccessLog{ - { - OutputDestination: &als.AccessLog_FileSink{ - FileSink: &als.FileSink{ - Path: "/dev/stdout", - OutputFormat: &als.FileSink_JsonFormat{ - JsonFormat: &structpb.Struct{ - Fields: map[string]*structpb.Value{ - "protocol": { - Kind: &structpb.Value_StringValue{ - StringValue: "%PROTOCOL%", - }, - }, - "method": { - Kind: &structpb.Value_StringValue{ - StringValue: "%REQ(:METHOD)%", - }, - }, - "path": { - Kind: &structpb.Value_StringValue{ - StringValue: "%REQ(:PATH)%", - }, - }, - "path_without_query": { - Kind: &structpb.Value_StringValue{ - StringValue: "%REQ_WITHOUT_QUERY(:PATH)%", - }, - }, - "route_md": { - Kind: &structpb.Value_StringValue{ - StringValue: "%METADATA(ROUTE:foo-namespace)%", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - testContext.ResourcesToCreate().Gateways = v1.GatewayList{ - gw, - } - }) - It("can create json access logs", func() { - requestBuilder := testContext.GetHttpRequestBuilder(). - WithPath("1?foo=bar"). - WithPostMethod() - Eventually(func(g Gomega) { - g.Expect(testutils.DefaultHttpClient.Do(requestBuilder.Build())).Should(matchers.HaveOkResponse()) - - logs, err := testContext.EnvoyInstance().Logs() - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(logs).To(ContainSubstring(`"method":"POST"`)) - g.Expect(logs).To(ContainSubstring(`"path":"/1?foo=bar"`)) - g.Expect(logs).To(ContainSubstring(`"path_without_query":"/1"`)) - g.Expect(logs).To(ContainSubstring(`"protocol":"HTTP/1.1"`)) - g.Expect(logs).To(ContainSubstring(`"route_md":{"bar-metadata":"greetings"}`)) - }, time.Second*30, time.Second/2).Should(Succeed()) - }) - }) - }) - - Context("Test Filters", func() { - // The output format doesn't (or at least shouldn't) matter for the filter tests, except in how we examine the access logs - // We'll use the string output because it's easiest to match against - BeforeEach(func() { - gw := gwdefaults.DefaultGateway(writeNamespace) - filter := &als.AccessLogFilter{ - FilterSpecifier: &als.AccessLogFilter_StatusCodeFilter{ - StatusCodeFilter: &als.StatusCodeFilter{ - Comparison: &als.ComparisonFilter{ - Op: als.ComparisonFilter_EQ, - Value: &gloo_envoy_v3.RuntimeUInt32{ - DefaultValue: 404, - RuntimeKey: "404", - }, - }, - }, - }, - } - - gw.Options = &gloov1.ListenerOptions{ - AccessLoggingService: &als.AccessLoggingService{ - AccessLog: []*als.AccessLog{ - { - OutputDestination: &als.AccessLog_FileSink{ - FileSink: &als.FileSink{ - Path: "/dev/stdout", - OutputFormat: &als.FileSink_StringFormat{ - StringFormat: "", - }, - }, - }, - Filter: filter, - }, - }, - }, - } - testContext.ResourcesToCreate().Gateways = v1.GatewayList{ - gw, - } - }) - - It("Can filter by status code", func() { - requestBuilder := testContext.GetHttpRequestBuilder(). - WithPath("1"). - WithPostMethod() - Eventually(func(g Gomega) { - g.Expect(testutils.DefaultHttpClient.Do(requestBuilder.Build())).Should(matchers.HaveOkResponse()) - - logs, err := testContext.EnvoyInstance().Logs() - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(logs).To(Not(ContainSubstring(`"POST /1 HTTP/1.1" 200`))) - }, time.Second*30, time.Second/2).Should(Succeed()) - - badHostRequestBuilder := testContext.GetHttpRequestBuilder(). - WithPath("BAD/HOST"). - WithPostMethod(). - WithHost("") // We can get a 404 by not setting the Host header. - Eventually(func(g Gomega) { - g.Expect(testutils.DefaultHttpClient.Do(badHostRequestBuilder.Build())).Should(matchers.HaveStatusCode(http.StatusNotFound)) - - logs, err := testContext.EnvoyInstance().Logs() - g.Expect(err).To(Not(HaveOccurred())) - g.Expect(logs).To(Not(ContainSubstring(`"POST /1 HTTP/1.1" 200`))) - g.Expect(logs).To(ContainSubstring(`"POST /BAD/HOST HTTP/1.1" 404`)) - - }, time.Second*30, time.Second/2).Should(Succeed()) - }) - }) - -}) - -func runAccessLog(ctx context.Context, accessLogPort uint32) <-chan *envoy_data_accesslog_v3.HTTPAccessLogEntry { - msgChan := make(chan *envoy_data_accesslog_v3.HTTPAccessLogEntry, 10) - - opts := loggingservice.Options{ - Ordered: true, - Callbacks: loggingservice.AlsCallbackList{ - func(ctx context.Context, message *envoyals.StreamAccessLogsMessage) error { - defer GinkgoRecover() - httpLogs := message.GetHttpLogs() - Expect(httpLogs).NotTo(BeNil()) - for _, v := range httpLogs.LogEntry { - select { - case msgChan <- v: - return nil - case <-time.After(time.Second): - Fail("unable to send log message on channel") - } - } - return nil - }, - }, - Ctx: ctx, - } - - service := loggingservice.NewServer(opts) - - settings := runner.Settings{ - DebugPort: 0, - ServerPort: int(accessLogPort), - ServiceName: "AccessLog", - } - - go func(testctx context.Context) { - defer GinkgoRecover() - err := runner.RunWithSettings(testctx, service, settings) - if testctx.Err() == nil { - Expect(err).NotTo(HaveOccurred()) - } - }(ctx) - return msgChan -} diff --git a/test/e2e/grpcweb_test.go b/test/e2e/grpcweb_test.go deleted file mode 100644 index b5e2c123b24..00000000000 --- a/test/e2e/grpcweb_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package e2e_test - -import ( - "bytes" - "encoding/base64" - "fmt" - "net/http" - "time" - - "github.com/solo-io/gloo/test/gomega/matchers" - - proto_matchers "github.com/solo-io/solo-kit/test/matchers" - - gatewaydefaults "github.com/solo-io/gloo/projects/gateway/pkg/defaults" - "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/options/grpc_web" - - envoy_data_accesslog_v3 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" - envoyals "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/wrappers" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - v1 "github.com/solo-io/gloo/projects/gateway/pkg/api/v1" - gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" - static_plugin_gloo "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/options/static" - "github.com/solo-io/gloo/test/e2e" - "github.com/solo-io/gloo/test/helpers" - "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" -) - -var _ = Describe("Grpc Web", func() { - - var ( - testContext *e2e.TestContext - ) - - BeforeEach(func() { - testContext = testContextFactory.NewTestContext() - testContext.BeforeEach() - }) - - AfterEach(func() { - testContext.AfterEach() - }) - - JustBeforeEach(func() { - testContext.JustBeforeEach() - }) - - JustAfterEach(func() { - testContext.JustAfterEach() - }) - - Context("Disable", func() { - - BeforeEach(func() { - gw := gatewaydefaults.DefaultGateway(writeNamespace) - gw.GetHttpGateway().Options = &gloov1.HttpListenerOptions{ - GrpcWeb: &grpc_web.GrpcWeb{ - Disable: true, - }, - } - - testContext.ResourcesToCreate().Gateways = v1.GatewayList{ - gw, - } - }) - - It("can disable grpc web filter", func() { - Eventually(func(g Gomega) { - proxy, err := testContext.ReadDefaultProxy() - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(proxy.GetListeners()).To(HaveLen(1)) - g.Expect(proxy.GetListeners()[0].GetHttpListener().GetOptions().GetGrpcWeb()).To(proto_matchers.MatchProto(&grpc_web.GrpcWeb{ - Disable: true, - })) - }, "5s", ".5s").Should(Succeed()) - }) - }) - - Context("Grpc", func() { - - var ( - msgChan <-chan *envoy_data_accesslog_v3.HTTPAccessLogEntry - ) - - BeforeEach(func() { - grpcUpstream := &gloov1.Upstream{ - Metadata: &core.Metadata{ - Name: "grpc-service", - Namespace: writeNamespace, - }, - UseHttp2: &wrappers.BoolValue{Value: true}, - UpstreamType: &gloov1.Upstream_Static{ - Static: &static_plugin_gloo.UpstreamSpec{ - Hosts: []*static_plugin_gloo.Host{ - { - Addr: testContext.EnvoyInstance().LocalAddr(), - Port: testContext.EnvoyInstance().AccessLogPort, - }, - }, - }, - }, - } - vsToGrpcUpstream := helpers.NewVirtualServiceBuilder(). - WithName("vs-grpc"). - WithNamespace(writeNamespace). - WithDomain("grpc.com"). - WithRoutePrefixMatcher("grpc", "/"). - WithRouteActionToUpstream("grpc", grpcUpstream). - Build() - - // we want to test grpc web, so lets reuse the access log service - // we could use any other service, but we already have the ALS setup for tests - msgChan = runAccessLog(testContext.Ctx(), testContext.EnvoyInstance().AccessLogPort) - - gw := gatewaydefaults.DefaultGateway(writeNamespace) - gw.GetHttpGateway().Options = &gloov1.HttpListenerOptions{ - GrpcWeb: &grpc_web.GrpcWeb{ - Disable: false, - }, - } - - testContext.ResourcesToCreate().Gateways = v1.GatewayList{ - gw, - } - testContext.ResourcesToCreate().VirtualServices = v1.VirtualServiceList{ - vsToGrpcUpstream, - } - testContext.ResourcesToCreate().Upstreams = gloov1.UpstreamList{ - grpcUpstream, - } - }) - - It("works with grpc web", func() { - // make a grpc web request - toSend := &envoyals.StreamAccessLogsMessage{ - LogEntries: &envoyals.StreamAccessLogsMessage_HttpLogs{ - HttpLogs: &envoyals.StreamAccessLogsMessage_HTTPAccessLogEntries{ - LogEntry: []*envoy_data_accesslog_v3.HTTPAccessLogEntry{{ - CommonProperties: &envoy_data_accesslog_v3.AccessLogCommon{ - UpstreamCluster: "foo", - }, - }}, - }, - }, - } - - // send toSend using grpc web - body, err := proto.Marshal(toSend) - Expect(err).NotTo(HaveOccurred()) - - var buffer bytes.Buffer - // write the length in the buffer - // compressed flag - buffer.Write([]byte{0}) - // length - Expect(len(body)).To(BeNumerically("<=", 0xff)) - buffer.Write([]byte{0, 0, 0, byte(len(body))}) - - // write the body to the buffer - buffer.Write(body) - - dest := make([]byte, base64.StdEncoding.EncodedLen(len(buffer.Bytes()))) - base64.StdEncoding.Encode(dest, buffer.Bytes()) - var bufferbase64 bytes.Buffer - bufferbase64.Write(dest) - - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://localhost:%d/envoy.service.accesslog.v3.AccessLogService/StreamAccessLogs", testContext.EnvoyInstance().HttpPort), &bufferbase64) - Expect(err).NotTo(HaveOccurred()) - req.Host = "grpc.com" - req.Header.Set("content-type", "application/grpc-web-text") - - Eventually(func(g Gomega) { - g.Expect(http.DefaultClient.Do(req)).Should(matchers.HaveOkResponse()) - }, "10s", "0.5s").Should(Succeed()) - - var entry *envoy_data_accesslog_v3.HTTPAccessLogEntry - Eventually(msgChan, time.Second).Should(Receive(&entry)) - Expect(entry.CommonProperties.UpstreamCluster).To(Equal("foo")) - }) - }) - -}) diff --git a/test/kube2e/gloo/resource_client_test.go b/test/kube2e/gloo/resource_client_test.go deleted file mode 100644 index f31b06011ef..00000000000 --- a/test/kube2e/gloo/resource_client_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package gloo_test - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/solo-io/gloo/projects/ingress/pkg/api/service" - v1 "github.com/solo-io/gloo/projects/ingress/pkg/api/v1" - "github.com/solo-io/solo-kit/pkg/api/v1/clients" - "github.com/solo-io/solo-kit/test/helpers" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) - -// Kubernetes tests for resource client from projects/ingress/pkg/api/service -var _ = Describe("ResourceClient", func() { - - var ( - testNamespace string - - kubeClient kubernetes.Interface - ) - - BeforeEach(func() { - var err error - - testNamespace = helpers.RandString(8) - kubeClient = resourceClientset.KubeClients() - - _, err = kubeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: testNamespace, - }, - }, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - - AfterEach(func() { - err := kubeClient.CoreV1().Namespaces().Delete(ctx, testNamespace, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) - }) - - It("can CRUD on v1 Services", func() { - baseClient := service.NewResourceClient(kubeClient, &v1.Ingress{}) - svcClient := v1.NewKubeServiceClientWithBase(baseClient) - - kubeSvcClient := kubeClient.CoreV1().Services(testNamespace) - kubeSvc, err := kubeSvcClient.Create(ctx, &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "hi", - Namespace: testNamespace, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: "http", - Protocol: corev1.ProtocolTCP, - Port: 1234, - }, - }, - Selector: map[string]string{"hi": "bye"}, - }, - }, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - ingressResource, err := svcClient.Read(kubeSvc.Namespace, kubeSvc.Name, clients.ReadOpts{}) - Expect(err).NotTo(HaveOccurred()) - convertedIng, err := service.ToKube(ingressResource) - Expect(err).NotTo(HaveOccurred()) - Expect(convertedIng.Spec).To(Equal(kubeSvc.Spec)) - }) -}) From 4ea021f04c4aae6d7c390f664344bde62de02a70 Mon Sep 17 00:00:00 2001 From: Ian Rudie Date: Fri, 17 Jan 2025 12:18:49 -0500 Subject: [PATCH 2/4] fixup the README.md post-rename (#10468) Signed-off-by: Ian Rudie Co-authored-by: Jenny Shu <28537278+jenshu@users.noreply.github.com> --- README.md | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 785fe6b0622..9e1855651f5 100644 --- a/README.md +++ b/README.md @@ -8,24 +8,24 @@ An Envoy-Powered Kubernetes-Native API Gateway +# kgateway -# 🚧 NOTE: This project is in the process of being donated to the CNCF and is not affiliated with the Kubernetes project. It is being re-factored as part of the process.🚧 -Please see [the plan](https://github.com/k8sgateway/k8sgateway/issues/10363) for more information and current status. +Please see [the plan](https://github.com/kgateway-dev/kgateway/issues/10363) for more information and current status. -## About K8sGateway -K8sGateway is a feature-rich, fast, and flexible Kubernetes-native ingress controller and next-generation API gateway that is built on top of [Envoy proxy](https://www.envoyproxy.io) and the Kubernetes Gateway API. It excels in function-level routing, supports legacy apps, microservices and serverless, offers robust discovery capabilities, integrates seamlessly with open-source projects, and is designed to support hybrid applications with various technologies, architectures, protocols, and clouds. +## About kgateway +Kgateway is a feature-rich, fast, and flexible Kubernetes-native ingress controller and next-generation API gateway that is built on top of [Envoy proxy](https://www.envoyproxy.io) and the Kubernetes Gateway API. It excels in function-level routing, supports legacy apps, microservices and serverless, offers robust discovery capabilities, integrates seamlessly with open-source projects, and is designed to support hybrid applications with various technologies, architectures, protocols, and clouds. [**Installation**](https://k8sgateway.io/docs/quickstart/)   |   [**Documentation**](https://k8sgateway.io/docs)   |   [**Blog**](https://k8sgateway.io/docs/)   |   [**Slack**](https://cloud-native.slack.com/archives/C080D3PJMS4)   | -
K8sGateway Architecture
+
kgateway Architecture
-### Using K8sGateway -- **Kubernetes Gateway API**: K8sGateway is a feature-rich ingress controller, built on top of the Envoy Proxy and fully conformant with the Kubernetes Gateway API. -- **Next-generation API gateway**: K8sGateway provides a long list of API gateway features including rate limiting, circuit breaking, retries, caching, transformation, service-mesh integration, security, external authentication and authorization. -- **Hybrid apps**: K8sGateway creates applications that route to backends implemented as microservices, serverless functions and legacy apps. This feature can help users to +### Using kgateway +- **Kubernetes Gateway API**: Kgateway is a feature-rich ingress controller, built on top of the Envoy Proxy and fully conformant with the Kubernetes Gateway API. +- **Next-generation API gateway**: Kgateway provides a long list of API gateway features including rate limiting, circuit breaking, retries, caching, transformation, service-mesh integration, security, external authentication and authorization. +- **Hybrid apps**: Kgateway creates applications that route to backends implemented as microservices, serverless functions and legacy apps. This feature can help users to * Gradually migrate from their legacy code to microservices and serverless. * Add new functionalities using cloud-native technologies while maintaining their legacy codebase. * Allow different teams in an organization choose different architectures. @@ -34,22 +34,22 @@ K8sGateway is a feature-rich, fast, and flexible Kubernetes-native ingress contr PLEASE DO NOT RENAME THIS SECTION This header is used as an anchor in our CNCF Donation Issue --> -### What makes K8sGateway unique -- **Function-level routing allows integration of legacy applications, microservices and serverless**: K8sGateway can route requests directly to functions. Request to Function can be a serverless function call (e.g. Lambda, Google Cloud Function, OpenFaaS Function, etc.), an API call on a microservice or a legacy service (e.g. a REST API call, OpenAPI operation, XML/SOAP request etc.), or publishing to a message queue (e.g. NATS, AMQP, etc.). This unique ability is what makes K8sGateway the only API gateway that supports hybrid apps as well as the only one that does not tie the user to a specific paradigm. -- **K8sGateway incorporates vetted open-source projects to provide broad functionality**: K8sGateway supports high-quality features by integrating with top open-source projects, including gRPC, OpenTracing, NATS and more. K8sGateway's architecture allows rapid integration of future popular open-source projects as they emerge. -- **Full automated discovery lets users move fast**: Upon launch, K8sGateway creates a catalog of all available destinations and continuously keeps them up to date. This takes the responsibility for 'bookkeeping' away from the developers and guarantees that new features become available as soon as they are ready. K8sGateway discovers across IaaS, PaaS and FaaS providers as well as Swagger, and gRPC. +### What makes kgateway unique +- **Function-level routing allows integration of legacy applications, microservices and serverless**: Kgateway can route requests directly to functions. Request to Function can be a serverless function call (e.g. Lambda, Google Cloud Function, OpenFaaS Function, etc.), an API call on a microservice or a legacy service (e.g. a REST API call, OpenAPI operation, XML/SOAP request etc.), or publishing to a message queue (e.g. NATS, AMQP, etc.). This unique ability is what makes kgateway the only API gateway that supports hybrid apps as well as the only one that does not tie the user to a specific paradigm. +- **Kgateway incorporates vetted open-source projects to provide broad functionality**: Kgateway supports high-quality features by integrating with top open-source projects, including gRPC, OpenTracing, NATS and more. Kgateway's architecture allows rapid integration of future popular open-source projects as they emerge. +- **Full automated discovery lets users move fast**: Upon launch, kgateway creates a catalog of all available destinations and continuously keeps them up to date. This takes the responsibility for 'bookkeeping' away from the developers and guarantees that new features become available as soon as they are ready. Kgateway discovers across IaaS, PaaS and FaaS providers as well as Swagger, and gRPC. -## Next Steps -- Join us on our Slack channel: [#k8sgateway](https://cloud-native.slack.com/archives/C080D3PJMS4) +## Next steps +- Join us on our Slack channel: [#kgateway](https://cloud-native.slack.com/archives/C080D3PJMS4) - Check out the docs: [https://k8sgateway.io/docs](https://k8sgateway.io/docs) -- Learn more about the [community](https://github.com/k8sgateway/community) +- Learn more about the [community](https://github.com/kgateway-dev/community) -## Contributing to K8sGateway +## Contributing to kgateway The [devel](devel) folder should be the starting point for understanding the code, and contributing to the product. ## Thanks -**K8sGateway** would not be possible without the valuable open-source work of projects in the community. We would like to extend a special thank-you to [Envoy](https://www.envoyproxy.io). +**Kgateway** would not be possible without the valuable open-source work of projects in the community. We would like to extend a special thank-you to [Envoy](https://www.envoyproxy.io). ## Security -*Reporting security issues* : We take K8sGateway's security very seriously. If you've found a security issue or a potential security issue in K8sGateway, please DO NOT file a public Github issue, instead follow the directions laid out in the [k8sgateway/community respository](https://github.com/k8sgateway/community/blob/main/CVE.md). +*Reporting security issues* : We take kgateway's security very seriously. If you've found a security issue or a potential security issue in kgateway, please DO NOT file a public Github issue, instead follow the directions laid out in the [kgateway/community respository](https://github.com/kgateway-dev/community/blob/main/CVE.md). From dba20ce459113cf9f55692cf589fbf381e25fc88 Mon Sep 17 00:00:00 2001 From: Lawrence Gadban Date: Fri, 17 Jan 2025 12:20:45 -0600 Subject: [PATCH 3/4] remove ALS helm --- .../templates/6-access-logger-deployment.yaml | 99 ------------------- .../templates/6-access-logger-service.yaml | 33 ------- 2 files changed, 132 deletions(-) delete mode 100644 install/helm/gloo/templates/6-access-logger-deployment.yaml delete mode 100644 install/helm/gloo/templates/6-access-logger-service.yaml diff --git a/install/helm/gloo/templates/6-access-logger-deployment.yaml b/install/helm/gloo/templates/6-access-logger-deployment.yaml deleted file mode 100644 index 995ca937b1c..00000000000 --- a/install/helm/gloo/templates/6-access-logger-deployment.yaml +++ /dev/null @@ -1,99 +0,0 @@ -{{- define "accessLogger.deploymentSpec"}} -{{- if and .Values.gateway.enabled .Values.accessLogger.enabled }} -{{- $image := .Values.accessLogger.image }} -{{- $statsConfig := coalesce .Values.accessLogger.stats .Values.global.glooStats -}} -{{- if .Values.global }} -{{- $image = merge .Values.accessLogger.image .Values.global.image }} -{{- end }} - -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: gateway-proxy-access-logger - name: gateway-proxy-access-logger - namespace: {{ $.Release.Namespace }} -spec: - replicas: {{ .Values.accessLogger.replicas }} - selector: - matchLabels: - app: gloo - gloo: gateway-proxy-access-logger - template: - metadata: - labels: - app: gloo - gloo: gateway-proxy-access-logger - {{- if .Values.accessLogger.extraAccessLoggerLabels }} - {{- range $key, $value := .Values.accessLogger.extraAccessLoggerLabels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - {{- if .Values.global.istioIntegration.disableAutoinjection }} - sidecar.istio.io/inject: "false" - {{- end }} - annotations: - {{- if $statsConfig.enabled }} - prometheus.io/path: /metrics - prometheus.io/port: "9091" - prometheus.io/scrape: "true" - {{- end }} - {{- if .Values.accessLogger.extraAccessLoggerAnnotations }} - {{- range $key, $value := .Values.accessLogger.extraAccessLoggerAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - spec: - {{- include "gloo.pullSecret" $image | nindent 6 -}} - serviceAccountName: gateway-proxy - {{- include "gloo.podSpecStandardFields" .Values.accessLogger | nindent 6 -}} - securityContext: - runAsNonRoot: true - {{- if not .Values.accessLogger.floatingUserId }} - runAsUser: {{ printf "%.0f" (float64 .Values.accessLogger.runAsUser) -}} - {{- end }} - containers: - - image: {{ template "gloo.image" $image }} - imagePullPolicy: {{ $image.pullPolicy }} - name: access-logger - {{- include "gloo.containerSecurityContext" (dict "values" .Values.accessLogger.accessLoggerContainerSecurityContext "podSecurityStandards" .Values.global.podSecurityStandards "indent" 10 "globalSec" .Values.global.securitySettings) }} -{{- if .Values.accessLogger.resources }} - resources: -{{ toYaml .Values.accessLogger.resources | indent 12}} -{{- end}} - env: -{{- if .Values.accessLogger.customEnv }} -{{ toYaml .Values.accessLogger.customEnv | indent 10 }} -{{- end }} - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name -{{- if .Values.accessLogger.serviceName }} - - name: SERVICE_NAME - value: {{.Values.accessLogger.serviceName}} -{{- end }} {{/* if .Values.accessLogger.serviceName */}} - - name: SERVER_PORT - value: "{{ .Values.accessLogger.port }}" - ports: - - containerPort: {{ .Values.accessLogger.port }} - name: http - protocol: TCP -{{- if and $statsConfig.enabled $statsConfig.podMonitorEnabled }} - - name: http-monitoring - containerPort: 9091 -{{- end }} {{/* if $statsConfig.podMonitorEnabled */}} -{{- end }} {{/* if and .Values.gateway.enabled .Values.accessLogger.enabled */}} -{{- end }} {{/* define "accessLogger.deploymentSpec" */}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.accessLogger.deployment -}} -{{- $kubeResourceOverride = .Values.accessLogger.deployment.kubeResourceOverride -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "accessLogger.deploymentSpec") -}} \ No newline at end of file diff --git a/install/helm/gloo/templates/6-access-logger-service.yaml b/install/helm/gloo/templates/6-access-logger-service.yaml deleted file mode 100644 index 4edef410619..00000000000 --- a/install/helm/gloo/templates/6-access-logger-service.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- define "accessLogger.serviceSpec"}} -{{- if and .Values.gateway.enabled .Values.accessLogger.enabled }} -{{- $statsConfig := coalesce .Values.accessLogger.stats .Values.global.glooStats -}} -apiVersion: v1 -kind: Service -metadata: - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: gateway-proxy-access-logger - name: gateway-proxy-access-logger - namespace: {{ $.Release.Namespace }} -spec: - ports: - - port: {{ .Values.accessLogger.port }} - targetPort: {{ .Values.accessLogger.port }} - protocol: TCP - name: http -{{- if and $statsConfig.enabled $statsConfig.serviceMonitorEnabled }} - - name: http-monitoring - port: 9091 -{{- end }} - selector: - app: gloo - gloo: gateway-proxy-access-logger -{{- end }} {{/* if and .Values.gateway.enabled .Values.accessLogger.enabled */}} -{{- end }} {{/* define "accessLogger.serviceSpec" */}} - -{{/* Render template with yaml overrides */}} -{{- $kubeResourceOverride := dict -}} -{{- if .Values.accessLogger.service -}} -{{- $kubeResourceOverride = .Values.accessLogger.service.kubeResourceOverride -}} -{{- end -}} -{{- include "gloo.util.merge" (list . $kubeResourceOverride "accessLogger.serviceSpec") -}} \ No newline at end of file From 4ee961a845d0cb1424c18fd6feff708447df1ff3 Mon Sep 17 00:00:00 2001 From: Lawrence Gadban Date: Fri, 17 Jan 2025 12:47:39 -0600 Subject: [PATCH 4/4] fix helm chart --- install/helm/gloo/templates/18-settings.yaml | 9 ------- ...-namespace-clusterrolebinding-knative.yaml | 27 ------------------- 2 files changed, 36 deletions(-) delete mode 100644 install/helm/gloo/templates/25-namespace-clusterrolebinding-knative.yaml diff --git a/install/helm/gloo/templates/18-settings.yaml b/install/helm/gloo/templates/18-settings.yaml index 146dc743cbf..cb74d77a362 100644 --- a/install/helm/gloo/templates/18-settings.yaml +++ b/install/helm/gloo/templates/18-settings.yaml @@ -103,15 +103,6 @@ spec: refreshRate: 60s {{- if .Values.settings.linkerd }} linkerd: true -{{- end }} -{{- if .Values.settings.integrations.knative.enabled }} - knative: -{{- if (semverCompare "< 0.8.0" .Values.settings.integrations.knative.version ) }} - clusterIngressProxyAddress: "clusteringress-proxy.{{ .Release.Namespace }}.svc.{{ $.Values.k8s.clusterName}}" -{{- else }} - knativeExternalProxyAddress: "knative-external-proxy.{{ .Release.Namespace }}.svc.{{ $.Values.k8s.clusterName}}" - knativeInternalProxyAddress: "knative-internal-proxy.{{ .Release.Namespace }}.svc.{{ $.Values.k8s.clusterName}}" -{{- end }} {{- end }} gateway: diff --git a/install/helm/gloo/templates/25-namespace-clusterrolebinding-knative.yaml b/install/helm/gloo/templates/25-namespace-clusterrolebinding-knative.yaml deleted file mode 100644 index a53139cd397..00000000000 --- a/install/helm/gloo/templates/25-namespace-clusterrolebinding-knative.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{{- if .Values.global.glooRbac.create }} - -{{- if .Values.settings.integrations.knative.enabled }} -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: gloo-role-binding-knative-{{ .Release.Namespace }} - labels: -{{ include "gloo.labels" . | indent 4}} - gloo: rbac -subjects: -- kind: ServiceAccount - name: default - namespace: {{ .Release.Namespace }} -- kind: ServiceAccount - name: discovery - namespace: {{ .Release.Namespace }} -- kind: ServiceAccount - name: gloo - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: gloo-role-knative{{ include "gloo.rbacNameSuffix" . }} - apiGroup: rbac.authorization.k8s.io -{{- end -}} - -{{- end -}}