diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..39c3605 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,36 @@ +{ + "name": "Kubebuilder DevContainer", + "image": "golang:1.23", + "features": { + "ghcr.io/devcontainers/features/docker-in-docker:2": {}, + "ghcr.io/devcontainers/features/git:1": {}, + "ghcr.io/devcontainers/features/common-utils": { + "installOhMyZsh": true, + "configureZshAsDefaultShell": true, + "installOhMyZshConfig": true, + "installZsh": true, + "upgradePackages": true + }, + "ghcr.io/dhoeric/features/act": {} + }, + "runArgs": [ + "--network=host" + ], + "customizations": { + "vscode": { + "settings": { + "terminal.integrated.shell.linux": "/bin/bash" + }, + "extensions": [ + "ms-kubernetes-tools.vscode-kubernetes-tools", + "ms-azuretools.vscode-docker" + ] + } + }, + "onCreateCommand": "bash .devcontainer/post-install.sh", + "mounts": [ + "source=dind-datum-var-lib-docker,target=/var/lib/docker,type=volume", + "source=${localWorkspaceFolder}/../network-services-operator,target=/workspaces/network-services-operator,type=bind", + "source=${localWorkspaceFolder}/../workload-operator,target=/workspaces/workload-operator,type=bind" + ] +} diff --git a/.devcontainer/post-install.sh b/.devcontainer/post-install.sh new file mode 100644 index 0000000..265c43e --- /dev/null +++ b/.devcontainer/post-install.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -x + +curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 +chmod +x ./kind +mv ./kind /usr/local/bin/kind + +curl -L -o kubebuilder https://go.kubebuilder.io/dl/latest/linux/amd64 +chmod +x kubebuilder +mv kubebuilder /usr/local/bin/ + +KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt) +curl -LO "https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl" +chmod +x kubectl +mv kubectl /usr/local/bin/kubectl + +docker network create -d=bridge --subnet=172.19.0.0/24 kind + +kind version +kubebuilder version +docker --version +go version +kubectl version --client diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..a3aab7a --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore build and test binaries. +bin/ diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..445e2be --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,23 @@ +name: Lint + +on: + push: + pull_request: + +jobs: + lint: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '~1.23' + + - name: Run linter + uses: golangci/golangci-lint-action@v6 + with: + version: v1.61 diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml new file mode 100644 index 0000000..a86ebdd --- /dev/null +++ b/.github/workflows/test-e2e.yml @@ -0,0 +1,35 @@ +name: E2E Tests + +on: + push: + pull_request: + +jobs: + test-e2e: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '~1.23' + + - name: Install the latest version of kind + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 + chmod +x ./kind + sudo mv ./kind /usr/local/bin/kind + + - name: Verify kind installation + run: kind version + + - name: Create kind cluster + run: kind create cluster + + - name: Running Test e2e + run: | + go mod tidy + make test-e2e diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..5781f67 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,23 @@ +name: Tests + +on: + push: + pull_request: + +jobs: + test: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '~1.23' + + - name: Running Tests + run: | + go mod tidy + make test diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2b0c6e4 --- /dev/null +++ b/.gitignore @@ -0,0 +1,27 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +go.work.sum + +# env file +.env + +bin/ diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..6b29746 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,47 @@ +run: + timeout: 5m + allow-parallel-runners: true + +issues: + # don't skip warning about doc comments + # don't exclude the default set of lint + exclude-use-default: false + # restore some of the defaults + # (fill in the rest as needed) + exclude-rules: + - path: "api/*" + linters: + - lll + - path: "internal/*" + linters: + - dupl + - lll +linters: + disable-all: true + enable: + - dupl + - errcheck + - copyloopvar + - ginkgolinter + - goconst + - gocyclo + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - lll + - misspell + - nakedret + - prealloc + - revive + - staticcheck + - typecheck + - unconvert + - unparam + - unused + +linters-settings: + revive: + rules: + - name: comment-spacings diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..db9b9ef --- /dev/null +++ b/Dockerfile @@ -0,0 +1,37 @@ +# Build the manager binary +FROM golang:1.23 AS builder +ARG TARGETOS +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY cmd/main.go cmd/main.go +# COPY api/ api/ +COPY internal/ internal/ + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +ENV GOCACHE=/root/.cache/go-build +ENV GOTMPDIR=/root/.cache/go-build +RUN --mount=type=cache,target=/go/pkg/mod/ \ + --mount=type=cache,target="/root/.cache/go-build" \ + CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -o manager cmd/main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/LICENSE b/LICENSE index 0ad25db..bae94e1 100644 --- a/LICENSE +++ b/LICENSE @@ -633,8 +633,8 @@ the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published - by the Free Software Foundation, either version 3 of the License, or + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, @@ -658,4 +658,4 @@ specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see -. +. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..ea8ec78 --- /dev/null +++ b/Makefile @@ -0,0 +1,211 @@ +# Image URL to use all building/pushing image targets +IMG ?= controller:latest +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.31.0 + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= docker + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + +# Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors. +.PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up. +.PHONY: test-e2e +test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. + # @command -v kind >/dev/null 2>&1 || { \ + # echo "Kind is not installed. Please install Kind manually."; \ + # exit 1; \ + # } + # @kind get clusters | grep -q 'kind' || { \ + # echo "No Kind cluster is running. Please start a Kind cluster before running the e2e tests."; \ + # exit 1; \ + # } + # go test ./test/e2e/ -v -ginkgo.v + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter + $(GOLANGCI_LINT) run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes + $(GOLANGCI_LINT) run --fix + +##@ Build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -o bin/manager cmd/main.go + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./cmd/main.go -health-probe-bind-address 0 \ + --kubeconfig=$(shell pwd)/infra.kubeconfig \ + --upstream-kubeconfig=$(shell pwd)/upstream.kubeconfig + +# If you wish to build the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + $(CONTAINER_TOOL) build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + $(CONTAINER_TOOL) push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name infra-provider-gcp-builder + $(CONTAINER_TOOL) buildx use infra-provider-gcp-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx rm infra-provider-gcp-builder + rm Dockerfile.cross + +.PHONY: build-installer +build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. + mkdir -p dist + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default > dist/install.yaml + +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install +install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - + +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - + +.PHONY: undeploy +undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUBECTL ?= kubectl +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.4.3 +CONTROLLER_TOOLS_VERSION ?= v0.16.1 +ENVTEST_VERSION ?= release-0.19 +GOLANGCI_LINT_VERSION ?= v1.62.0 + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f "$(1)-$(3)" ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +rm -f $(1) || true ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv $(1) $(1)-$(3) ;\ +} ;\ +ln -sf $(1)-$(3) $(1) +endef diff --git a/PROJECT b/PROJECT new file mode 100644 index 0000000..6777d39 --- /dev/null +++ b/PROJECT @@ -0,0 +1,10 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: datumapis.com +layout: +- go.kubebuilder.io/v4 +projectName: infra-provider-gcp +repo: go.datum.net/infra-provider-gcp +version: "3" diff --git a/README.md b/README.md new file mode 100644 index 0000000..e2839f4 --- /dev/null +++ b/README.md @@ -0,0 +1,104 @@ +# Datum GCP Infrastructure Provider + +This provider manages resources in GCP as a result of interpreting workload and +network related API entities defined by users. + +The primary APIs driving resource creation are defined in [workload-operator][workload-operator] +and [network-services-operator][network-services-operator]. + +[workload-operator]: https://github.com/datum-cloud/workload-operator +[network-services-operator]: https://github.com/datum-cloud/network-services-operator + +## Documentation + +Documentation will be available at [docs.datum.net](https://docs.datum.net/) +shortly. + +### Design Notes + +#### Instances + +Currently this provider leverages [GCP Managed Instance Groups][gcp-migs] to +manage instances within GCP. A future update will move toward more direct +instance control, as MIG resources and entities such as templates that are +required to use them take a considerably longer time to interact with than +direct VM instance control. + +[gcp-migs]: https://cloud.google.com/compute/docs/instance-groups#managed_instance_groups + +## Getting Started + +### Prerequisites + +- go version v1.23.0+ +- docker version 17.03+. +- kubectl version v1.31.0+. +- Access to a Kubernetes v1.31.0+ cluster. + +This provider makes use of the [GCP Config Connector][k8s-config-connector] +project to manage resources in GCP. It is expected that the config connector +and associated CRDs have been installed in the cluster. + +[k8s-config-connector]: https://github.com/GoogleCloudPlatform/k8s-config-connector + +### To Deploy on the cluster + +**Build and push your image to the location specified by `IMG`:** + +```sh +make docker-build docker-push IMG=/tmp:tag +``` + +**NOTE:** This image ought to be published in the personal registry you specified. +And it is required to have access to pull the image from the working environment. +Make sure you have the proper permission to the registry if the above commands don’t work. + +**Install the CRDs into the cluster:** + +```sh +make install +``` + +**Deploy the Manager to the cluster with the image specified by `IMG`:** + +```sh +make deploy IMG=/tmp:tag +``` + +> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin +privileges or be logged in as admin. + +**Create instances of your solution** +You can apply the samples (examples) from the config/sample: + +```sh +kubectl apply -k config/samples/ +``` + +>**NOTE**: Ensure that the samples has default values to test it out. + +### To Uninstall + +**Delete the instances (CRs) from the cluster:** + +```sh +kubectl delete -k config/samples/ +``` + +**Delete the APIs(CRDs) from the cluster:** + +```sh +make uninstall +``` + +**UnDeploy the controller from the cluster:** + +```sh +make undeploy +``` + + + +**NOTE:** Run `make help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 0000000..1c8ae10 --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package main + +import ( + "crypto/tls" + "flag" + "os" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/tools/clientcmd" + + kcccomputev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/compute/v1beta1" + kcciamv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/iam/v1beta1" + kccsecretmanagerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/secretmanager/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + "go.datum.net/infra-provider-gcp/internal/controller" + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" + computev1alpha "go.datum.net/workload-operator/api/v1alpha" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(computev1alpha.AddToScheme(scheme)) + utilruntime.Must(networkingv1alpha.AddToScheme(scheme)) + + utilruntime.Must(kcccomputev1beta1.AddToScheme(scheme)) + utilruntime.Must(kcciamv1beta1.AddToScheme(scheme)) + utilruntime.Must(kcciamv1beta1.AddToScheme(scheme)) + utilruntime.Must(kccsecretmanagerv1beta1.AddToScheme(scheme)) + + // +kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + var leaderElectionNamespace string + var probeAddr string + var secureMetrics bool + var enableHTTP2 bool + var tlsOpts []func(*tls.Config) + var upstreamKubeconfig string + var locationClassName string + var infraNamespace string + + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.StringVar(&leaderElectionNamespace, "leader-elect-namespace", "", "The namespace to use for leader election.") + flag.BoolVar(&secureMetrics, "metrics-secure", true, + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") + flag.BoolVar(&enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") + + // TODO(jreese) move to an approach that leverages a CRD to configure which + // locations this deployment of infra-provider-gcp will consider. Should + // include things like label or field selectors, anti-affinities for resources, + // etc. When this is done, we should investigate leveraging the `ByObject` + // setting of the informer cache to prevent populating the cache with entities + // which the operator does not need to receive. We'll likely need to lean + // into well known labels here, since a location class is defined on a location, + // which entities only reference and do not embed. + flag.StringVar( + &locationClassName, + "location-class", + "self-managed", + "Only consider resources attached to locations with the specified location class.", + ) + + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) + + flag.StringVar(&upstreamKubeconfig, "upstream-kubeconfig", "", "absolute path to the kubeconfig "+ + "file for the API server that is the source of truth for datum entities") + + flag.StringVar(&infraNamespace, "infra-namespace", "", "The namespace which resources for managing GCP entities "+ + "should be created in.") + + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + + webhookServer := webhook.NewServer(webhook.Options{ + TLSOpts: tlsOpts, + }) + + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + metricsServerOptions := metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: secureMetrics, + // TODO(user): TLSOpts is used to allow configuring the TLS config used for the server. If certificates are + // not provided, self-signed certificates will be generated by default. This option is not recommended for + // production environments as self-signed certificates do not offer the same level of trust and security + // as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing + // unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName + // to provide certificates, ensuring the server communicates using trusted and secure certificates. + TLSOpts: tlsOpts, + } + + if secureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/metrics/filters#WithAuthenticationAndAuthorization + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + } + + if len(upstreamKubeconfig) == 0 { + setupLog.Info("must provide --upstream-kubeconfig") + os.Exit(1) + } + + if len(infraNamespace) == 0 { + setupLog.Info("must provide --infra-namespace") + os.Exit(1) + } + + upstreamClusterConfig, err := clientcmd.BuildConfigFromFlags("", upstreamKubeconfig) + if err != nil { + setupLog.Error(err, "unable to load control plane kubeconfig") + os.Exit(1) + } + + mgr, err := ctrl.NewManager(upstreamClusterConfig, ctrl.Options{ + Scheme: scheme, + Metrics: metricsServerOptions, + WebhookServer: webhookServer, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "fddf20f1.datumapis.com", + LeaderElectionNamespace: leaderElectionNamespace, + + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + // We consider the cluster that the operator has been deployed in to be the + // target cluster for infrastructure components. + infraCluster, err := cluster.New(ctrl.GetConfigOrDie(), func(o *cluster.Options) { + o.Scheme = scheme + }) + if err != nil { + setupLog.Error(err, "failed to construct cluster") + os.Exit(1) + } + + if err := mgr.Add(infraCluster); err != nil { + setupLog.Error(err, "failed to add cluster to manager") + os.Exit(1) + } + + // TODO(jreese) rework the gateway controller when we have a higher level + // orchestrator from network-services-operator that schedules "sub gateways" + // onto clusters, similar to Workloads -> WorkloadDeployments and + // Networks -> NetworkContexts + // + // if err = (&controller.WorkloadGatewayReconciler{ + // Client: mgr.GetClient(), + // InfraClient: infraCluster.GetClient(), + // Scheme: mgr.GetScheme(), + // GCPProject: gcpProject, + // }).SetupWithManager(mgr, infraCluster); err != nil { + // setupLog.Error(err, "unable to create controller", "controller", "WorkloadReconciler") + // os.Exit(1) + // } + + if err = (&controller.WorkloadDeploymentReconciler{ + Client: mgr.GetClient(), + InfraClient: infraCluster.GetClient(), + Scheme: mgr.GetScheme(), + LocationClassName: locationClassName, + InfraClusterNamespaceName: infraNamespace, + }).SetupWithManager(mgr, infraCluster); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "WorkloadDeploymentReconciler") + os.Exit(1) + } + + if err = (&controller.InstanceDiscoveryReconciler{ + Client: mgr.GetClient(), + InfraClient: infraCluster.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr, infraCluster); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "InstanceDiscoveryReconciler") + os.Exit(1) + } + + if err = (&controller.NetworkContextReconciler{ + Client: mgr.GetClient(), + InfraClient: infraCluster.GetClient(), + Scheme: mgr.GetScheme(), + LocationClassName: locationClassName, + InfraClusterNamespaceName: infraNamespace, + }).SetupWithManager(mgr, infraCluster); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "NetworkContextReconciler") + os.Exit(1) + } + + // +kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 0000000..8ff97cd --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,151 @@ +# Adds namespace to all resources. +namespace: infra-provider-gcp-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: infra-provider-gcp- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +#- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml +# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. +# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. +# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will +# be able to communicate with the Webhook Server. +#- ../network-policy + +# Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager +patches: +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- path: manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- path: webhookcainjection_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +#replacements: +# - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - source: # Add cert-manager annotation to the webhook Service +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true diff --git a/config/default/manager_metrics_patch.yaml b/config/default/manager_metrics_patch.yaml new file mode 100644 index 0000000..2aaef65 --- /dev/null +++ b/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 diff --git a/config/default/metrics_service.yaml b/config/default/metrics_service.yaml new file mode 100644 index 0000000..fbd1d0a --- /dev/null +++ b/config/default/metrics_service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: infra-provider-gcp + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 0000000..5c5f0b8 --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml new file mode 100644 index 0000000..cecd468 --- /dev/null +++ b/config/manager/manager.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: infra-provider-gcp + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: infra-provider-gcp + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + # TODO(user): For common cases that do not require escalating privileges + # it is recommended to ensure that all your Pods/Containers are restrictive. + # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + # Please uncomment the following code if your project does NOT have to work on old Kubernetes + # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). + # seccompProfile: + # type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + - --health-probe-bind-address=:8081 + image: controller:latest + name: manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/config/network-policy/allow-metrics-traffic.yaml b/config/network-policy/allow-metrics-traffic.yaml new file mode 100644 index 0000000..3b1f2c9 --- /dev/null +++ b/config/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,26 @@ +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gathering data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + app.kubernetes.io/name: infra-provider-gcp + app.kubernetes.io/managed-by: kustomize + name: allow-metrics-traffic + namespace: system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP diff --git a/config/network-policy/kustomization.yaml b/config/network-policy/kustomization.yaml new file mode 100644 index 0000000..ec0fb5e --- /dev/null +++ b/config/network-policy/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- allow-metrics-traffic.yaml diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml new file mode 100644 index 0000000..ed13716 --- /dev/null +++ b/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml new file mode 100644 index 0000000..8d35230 --- /dev/null +++ b/config/prometheus/monitor.yaml @@ -0,0 +1,30 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: infra-provider-gcp + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https # Ensure this is the name of the port that exposes HTTPS metrics + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification. This poses a significant security risk by making the system vulnerable to + # man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between + # Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data, + # compromising the integrity and confidentiality of the information. + # Please use the following options for secure configurations: + # caFile: /etc/metrics-certs/ca.crt + # certFile: /etc/metrics-certs/tls.crt + # keyFile: /etc/metrics-certs/tls.key + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 0000000..5619aa0 --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,20 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000..030aba6 --- /dev/null +++ b/config/rbac/leader_election_role.yaml @@ -0,0 +1,40 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: infra-provider-gcp + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000..63597a8 --- /dev/null +++ b/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: infra-provider-gcp + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/metrics_auth_role.yaml b/config/rbac/metrics_auth_role.yaml new file mode 100644 index 0000000..32d2e4e --- /dev/null +++ b/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/config/rbac/metrics_auth_role_binding.yaml b/config/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 0000000..e775d67 --- /dev/null +++ b/config/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-auth-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/metrics_reader_role.yaml b/config/rbac/metrics_reader_role.yaml new file mode 100644 index 0000000..51a75db --- /dev/null +++ b/config/rbac/metrics_reader_role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 0000000..823dcca --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,130 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - compute.cnrm.cloud.google.com + resources: + - computeaddresses + - computebackendservices + - computefirewalls + - computeforwardingrules + - computehealthchecks + - computeinstancegroupmanagers + - computeinstancetemplates + - computenetworks + - computesubnetworks + - computetargettcpproxies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - compute.cnrm.cloud.google.com + resources: + - computeaddresses/status + - computebackendservices/status + - computefirewalls/status + - computeforwardingrules/status + - computehealthchecks/status + - computeinstancegroupmanager/status + - computeinstancegroupmanagers/status + - computeinstancetemplates/status + - computenetworks/status + - computesubnetworks/status + - computetargettcpproxies/status + verbs: + - get +- apiGroups: + - compute.cnrm.cloud.google.com + resources: + - computeinstancegroupmanager + verbs: + - get + - list + - watch +- apiGroups: + - compute.datumapis.com + resources: + - networkcontexts + verbs: + - get + - list + - watch +- apiGroups: + - compute.datumapis.com + resources: + - networkcontexts/finalizers + - workloaddeployments/finalizers + - workloads/finalizers + verbs: + - update +- apiGroups: + - compute.datumapis.com + resources: + - networkcontexts/status + - workloaddeployments/status + - workloads/status + verbs: + - get + - patch + - update +- apiGroups: + - compute.datumapis.com + resources: + - workloaddeployments + - workloads + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - iam.cnrm.cloud.google.com + resources: + - iampolicies + - iamserviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - iam.cnrm.cloud.google.com + resources: + - iampolicies/status + - iamserviceaccounts/status + verbs: + - get +- apiGroups: + - secretmanager.cnrm.cloud.google.com + resources: + - secretmanagersecrets + - secretmanagersecretversions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - secretmanager.cnrm.cloud.google.com + resources: + - secretmanagersecrets/status + - secretmanagersecretversions/status + verbs: + - get diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml new file mode 100644 index 0000000..9b631c2 --- /dev/null +++ b/config/rbac/role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: infra-provider-gcp + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml new file mode 100644 index 0000000..209a331 --- /dev/null +++ b/config/rbac/service_account.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: infra-provider-gcp + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..de7953c --- /dev/null +++ b/go.mod @@ -0,0 +1,116 @@ +module go.datum.net/infra-provider-gcp + +go 1.23.0 + +require ( + cloud.google.com/go/compute v1.29.0 + github.com/GoogleCloudPlatform/k8s-config-connector v1.125.0 + github.com/go-logr/logr v1.4.2 + github.com/googleapis/gax-go/v2 v2.14.0 + github.com/onsi/ginkgo/v2 v2.19.0 + github.com/onsi/gomega v1.33.1 + github.com/stretchr/testify v1.9.0 + go.datum.net/network-services-operator v0.0.0-20241205161241-d19fee49f0b7 + go.datum.net/workload-operator v0.0.0-20241205164213-161fb5150fb8 + google.golang.org/protobuf v1.35.1 + gopkg.in/yaml.v3 v3.0.1 + k8s.io/api v0.31.1 + k8s.io/apimachinery v0.31.1 + k8s.io/client-go v0.31.1 + sigs.k8s.io/controller-runtime v0.19.1 + sigs.k8s.io/gateway-api v1.2.0 +) + +require ( + cloud.google.com/go/auth v0.10.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.20.1 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240528025155-186aa0362fba // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.26.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/term v0.25.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + golang.org/x/tools v0.24.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/api v0.205.0 // indirect + google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/grpc v1.67.1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/apiextensions-apiserver v0.31.1 // indirect + k8s.io/apiserver v0.31.1 // indirect + k8s.io/component-base v0.31.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..89cad71 --- /dev/null +++ b/go.sum @@ -0,0 +1,345 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.10.1 h1:TnK46qldSfHWt2a0b/hciaiVJsmDXWy9FqyUan0uYiI= +cloud.google.com/go/auth v0.10.1/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/compute v1.29.0 h1:Lph6d8oPi38NHkOr6S55Nus/Pbbcp37m/J0ohgKAefs= +cloud.google.com/go/compute v1.29.0/go.mod h1:HFlsDurE5DpQZClAGf/cYh+gxssMhBxBovZDYkEn/Og= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/GoogleCloudPlatform/k8s-config-connector v1.125.0 h1:oGWpJITjVgJ+ocATyf6BedTwK5tJ5KuOe+yhOyQ+oFU= +github.com/GoogleCloudPlatform/k8s-config-connector v1.125.0/go.mod h1:RX/TZs88Bbp3verrymNfRUnrNUYx9goLY5dMh0yqLjc= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= +github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= +github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g= +github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= +github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.datum.net/network-services-operator v0.0.0-20241205161241-d19fee49f0b7 h1:kqaIj3hl5q6fN1S4roMRinMVar7Iu2VrqHP7eAnT680= +go.datum.net/network-services-operator v0.0.0-20241205161241-d19fee49f0b7/go.mod h1:4D2oDDFw2opJlgkt3xqnE6Sjqzj60bpMYjuhbTYX6ps= +go.datum.net/workload-operator v0.0.0-20241205164213-161fb5150fb8 h1:QCmAX2lwNnoBKMTcuBAmv2ItfDsWgcL/8N7gPW/L6/g= +go.datum.net/workload-operator v0.0.0-20241205164213-161fb5150fb8/go.mod h1:0zt3wEOy4gLYdOPvErnxLkcE+T2DsjpPmXGANmiUTdQ= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/api v0.205.0 h1:LFaxkAIpDb/GsrWV20dMMo5MR0h8UARTbn24LmD+0Pg= +google.golang.org/api v0.205.0/go.mod h1:NrK1EMqO8Xk6l6QwRAmrXXg2v6dzukhlOyvkYtnvUuc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= +k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.1 h1:Sars5ejQDCRBY5f7R3QFHdqN3s61nhkpaX8/k1iEw1c= +k8s.io/apiserver v0.31.1/go.mod h1:lzDhpeToamVZJmmFlaLwdYZwd7zB+WYRYIboqA1kGxM= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= +k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108 h1:Q8Z7VlGhcJgBHJHYugJ/K/7iB8a2eSxCyxdVjJp+lLY= +k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk= +sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/gateway-api v1.2.0 h1:LrToiFwtqKTKZcZtoQPTuo3FxhrrhTgzQG0Te+YGSo8= +sigs.k8s.io/gateway-api v1.2.0/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 0000000..ea8ae64 --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1 @@ +// SPDX-License-Identifier: AGPL-3.0-only diff --git a/internal/controller/annotations.go b/internal/controller/annotations.go new file mode 100644 index 0000000..fe49549 --- /dev/null +++ b/internal/controller/annotations.go @@ -0,0 +1,5 @@ +package controller + +const ( + GCPProjectAnnotation = "cnrm.cloud.google.com/project-id" +) diff --git a/internal/controller/cloudinit/cloudconfig.go b/internal/controller/cloudinit/cloudconfig.go new file mode 100644 index 0000000..e47ae50 --- /dev/null +++ b/internal/controller/cloudinit/cloudconfig.go @@ -0,0 +1,30 @@ +package cloudinit + +import "gopkg.in/yaml.v3" + +type CloudConfig struct { + Hostname string `yaml:"hostname,omitempty"` + PreserveHostname *bool `yaml:"preserve_hostname,omitempty"` + RunCmd []string `yaml:"runcmd,omitempty"` + WriteFiles []WriteFile `yaml:"write_files,omitempty"` + FSSetup []FSSetup `yaml:"fs_setup,omitempty"` + Mounts []string `yaml:"mounts,omitempty"` +} + +func (c CloudConfig) Generate() ([]byte, error) { + return yaml.Marshal(c) +} + +type WriteFile struct { + Encoding string `yaml:"encoding"` + Content string `yaml:"content"` + Owner string `yaml:"owner"` + Path string `yaml:"path"` + Permissions string `yaml:"permissions"` +} + +type FSSetup struct { + Label string `yaml:"label"` + Filesystem string `yaml:"filesystem"` + Device string `yaml:"device"` +} diff --git a/internal/controller/cloudinit/populate_secrets.py b/internal/controller/cloudinit/populate_secrets.py new file mode 100644 index 0000000..9dee513 --- /dev/null +++ b/internal/controller/cloudinit/populate_secrets.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 + +import os +import sys +import requests +import base64 +import json + +def main(secret_manager_url): + # Fetch the access token + metadata_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token" + headers = {"Metadata-Flavor": "Google"} + token_response = requests.get(metadata_url, headers=headers) + access_token = token_response.json().get("access_token") + + # Fetch the secret payload from Secret Manager + secret_headers = { + "Authorization": f"Bearer {access_token}", + "Content-Type": "application/json" + } + secrets_response = requests.get(secret_manager_url, headers=secret_headers) + + # Parse and decode the secret payload + secrets_data = secrets_response.json().get("payload", {}).get("data") + if secrets_data: + secrets_json = json.loads(base64.b64decode(secrets_data).decode()) + + # Iterate through the top-level keys and values in the secrets JSON + for key, nested_data in secrets_json.items(): + # Create a directory for each top-level key + os.makedirs(f"/etc/secrets/content/{key}", exist_ok=True) + + # Iterate over nested child keys and values, decode if necessary, and save each to a file + for child_key, child_value in nested_data.items(): + decoded_value = base64.b64decode(child_value).decode() # decode the base64-encoded value + with open(f"/etc/secrets/content/{key}/{child_key}", "w") as file: + file.write(decoded_value) + else: + print("Error: No data field found in payload.") + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: script.py ") + sys.exit(1) + + secret_manager_url = sys.argv[1] + main(secret_manager_url) diff --git a/internal/controller/instancediscovery_controller.go b/internal/controller/instancediscovery_controller.go new file mode 100644 index 0000000..ef9f1f6 --- /dev/null +++ b/internal/controller/instancediscovery_controller.go @@ -0,0 +1,424 @@ +package controller + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + gcpcomputev1 "cloud.google.com/go/compute/apiv1" + "cloud.google.com/go/compute/apiv1/computepb" + kcccomputev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/compute/v1beta1" + kcccomputev1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" + "github.com/go-logr/logr" + "github.com/googleapis/gax-go/v2/apierror" + "google.golang.org/protobuf/proto" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/finalizer" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/source" + + "go.datum.net/infra-provider-gcp/internal/controller/k8sconfigconnector" + "go.datum.net/infra-provider-gcp/internal/crossclusterutil" + computev1alpha "go.datum.net/workload-operator/api/v1alpha" +) + +// InstanceDiscoveryReconciler reconciles a Workload object and processes any +// gateways defined. +type InstanceDiscoveryReconciler struct { + client.Client + InfraClient client.Client + Scheme *runtime.Scheme + + finalizers finalizer.Finalizers + instancesClient *gcpcomputev1.InstancesClient + instanceTemplatesClient *gcpcomputev1.InstanceTemplatesClient + migClient *gcpcomputev1.InstanceGroupManagersClient +} + +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computeinstancegroupmanager,verbs=get;list;watch +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computeinstancegroupmanager/status,verbs=get + +func (r *InstanceDiscoveryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Work with the unstructured form of an instance group manager, as the generated + // types are not aligned with the actual CRD. Particularly the `targetSize` + // field. + + var instanceGroupManager unstructured.Unstructured + instanceGroupManager.SetGroupVersionKind(kcccomputev1beta1.ComputeInstanceGroupManagerGVK) + + if err := r.InfraClient.Get(ctx, req.NamespacedName, &instanceGroupManager); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + finalizationResult, err := r.finalizers.Finalize(ctx, &instanceGroupManager) + if err != nil { + if v, ok := err.(kerrors.Aggregate); ok && v.Is(resourceIsDeleting) { + logger.Info("resources are still deleting, requeuing") + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil + } else { + return ctrl.Result{}, fmt.Errorf("failed to finalize: %w", err) + } + } + if finalizationResult.Updated { + if err = r.InfraClient.Update(ctx, &instanceGroupManager); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update based on finalization result: %w", err) + } + return ctrl.Result{}, nil + } + + var reconcileResult ctrl.Result + + var isDeleting bool + if t := instanceGroupManager.GetDeletionTimestamp(); !t.IsZero() { + isDeleting = true + reconcileResult.RequeueAfter = 10 * time.Second + } + + // Very ugly workaround for not being able to use the typed instance group + // manager. + conditions, err := extractUnstructuredConditions(instanceGroupManager.Object) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed extracting instance group manager conditions: %w", err) + } + + if !isDeleting && !k8sconfigconnector.IsStatusConditionTrue(conditions, kcccomputev1alpha1.ReadyConditionType) { + logger.Info("instance group manager not ready yet") + return ctrl.Result{}, nil + } + + logger.Info("reconciling instance group manager") + defer logger.Info("reconcile complete") + + var workloadDeployment computev1alpha.WorkloadDeployment + if !isDeleting { + w, err := r.getWorkloadDeploymentForInstanceGroupManager(ctx, instanceGroupManager) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching workload deployment: %w", err) + } + workloadDeployment = *w + } + + gcpProject, ok, err := unstructured.NestedString(instanceGroupManager.Object, "spec", "projectRef", "external") + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed reading project from instance group manager: %w", err) + } else if !ok { + return ctrl.Result{}, fmt.Errorf("empty project found on instance group manager") + } + + gcpZone, ok, err := unstructured.NestedString(instanceGroupManager.Object, "spec", "location") + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed reading zone from instance group manager: %w", err) + } else if !ok { + return ctrl.Result{}, fmt.Errorf("empty location found on instance group manager") + } + + // TODO(jreese) shortcut reconciliation based on stability and last observed + // info, status.isStable, etc. + // + // TODO(jreese) see if we can use the resource export functionality to obtain + // a yaml manifest that can be applied to create a ComputeInstance that will + // acquire the managed instance. This way, this reconciler can be responsible + // only for ensuring the ComputeInstances exist, and another reconciler can + // watch those in order to reconcile the Datum Instance representation. If + // we do that, we'll want to make sure to set the `abandon` annotation value. + + listRequest := &computepb.ListManagedInstancesInstanceGroupManagersRequest{ + Project: gcpProject, + Zone: gcpZone, + InstanceGroupManager: req.Name, + } + + // TODO(jreese) delete instances that no longer show up in the managed list + for managedInstance, err := range r.migClient.ListManagedInstances(ctx, listRequest).All() { + if err != nil { + if e, ok := err.(*apierror.APIError); ok && e.HTTPCode() == http.StatusNotFound { + break + } + return ctrl.Result{}, fmt.Errorf("failed listing managed instances: %w", err) + } + + result, err := r.reconcileDatumInstance( + ctx, + logger, + gcpProject, + gcpZone, + isDeleting, + &workloadDeployment, + managedInstance, + ) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed reconciling datum instance: %w", err) + } + + if result.RequeueAfter > 0 { + return result, nil + } + } + + // TODO(jreese) enable periodic reconcile + // TODO(jreese) 30 seconds is aggressive to do all the time, consider having + // this configurable similar to what KCC supports, and probably context based + // requeue after based on condition status transition times. + // return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + return reconcileResult, nil +} + +func (r *InstanceDiscoveryReconciler) getWorkloadDeploymentForInstanceGroupManager( + ctx context.Context, + instanceGroupManager unstructured.Unstructured, +) (*computev1alpha.WorkloadDeployment, error) { + labels := instanceGroupManager.GetLabels() + var workloadDeployment computev1alpha.WorkloadDeployment + if labels[crossclusterutil.UpstreamOwnerKindLabel] != "WorkloadDeployment" { + return nil, fmt.Errorf("failed to find WorkloadDeployment owner for ComputeInstanceGroupManager") + } + + workloadDeploymentObjectKey := client.ObjectKey{ + Namespace: labels[crossclusterutil.UpstreamOwnerNamespaceLabel], + Name: labels[crossclusterutil.UpstreamOwnerNameLabel], + } + if err := r.Client.Get(ctx, workloadDeploymentObjectKey, &workloadDeployment); err != nil { + return nil, fmt.Errorf("failed to get workload deployment: %w", err) + } + + return &workloadDeployment, nil +} + +func (r *InstanceDiscoveryReconciler) reconcileDatumInstance( + ctx context.Context, + logger logr.Logger, + gcpProject string, + gcpZone string, + isDeleting bool, + workloadDeployment *computev1alpha.WorkloadDeployment, + managedInstance *computepb.ManagedInstance, +) (ctrl.Result, error) { + + getInstanceReq := &computepb.GetInstanceRequest{ + Project: gcpProject, + Zone: gcpZone, + Instance: *managedInstance.Name, + } + + instance, err := r.instancesClient.Get(ctx, getInstanceReq) + + if err != nil { + if e, ok := err.(*apierror.APIError); ok && e.HTTPCode() == http.StatusNotFound { + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil + } + + return ctrl.Result{}, fmt.Errorf("failed fetching gcp instance for managed instance: %w", err) + } + + // Transform the instance name to what's expected based off of the workload + // deployment. + + if instance.Name == nil { + return ctrl.Result{}, fmt.Errorf("GCP instance name is nil, expected a value") + } + + datumInstanceName := fmt.Sprintf("%s-%s", workloadDeployment.Name, (*instance.Name)[strings.LastIndex(*instance.Name, "-")+1:]) + + datumInstance := &computev1alpha.Instance{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: workloadDeployment.Namespace, + Name: datumInstanceName, + }, + } + + if !isDeleting { + result, err := controllerutil.CreateOrUpdate(ctx, r.Client, datumInstance, func() error { + if datumInstance.CreationTimestamp.IsZero() { + logger.Info("creating datum instance") + } else { + logger.Info("updating datum instance") + } + + if err := controllerutil.SetControllerReference(workloadDeployment, datumInstance, r.Scheme); err != nil { + return fmt.Errorf("failed to set controller on instance: %w", err) + } + + // TODO(jreese) track a workload deployment revision that aligns with the + // instance template 1:1, and have a controller on that be responsible for + // creating the instance template. The deployment controller would then + // point the MIG to the latest version. + // + // TODO(jreese) this will be required for updates to instances by a new + // template to be communicated correctly. + + datumInstance.Spec = workloadDeployment.Spec.Template.Spec + return nil + }) + + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed reconciling datum instance: %w", err) + } + + if result != controllerutil.OperationResultNone { + logger.Info("datum instance mutated", "result", result) + } + } else { + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(datumInstance), datumInstance); err != nil { + if apierrors.IsNotFound(err) { + // This would occur during deletion at the moment. + return ctrl.Result{}, nil + } + return ctrl.Result{}, fmt.Errorf("failed fetching datum instance: %w", err) + } + } + + var instanceStatus string + if instance.Status != nil { + instanceStatus = *instance.Status + } + + var statusUpdated bool + + datumNetworkInterfaces := make([]computev1alpha.InstanceNetworkInterfaceStatus, 0, len(instance.NetworkInterfaces)) + + for _, networkInterface := range instance.NetworkInterfaces { + datumNetworkInterfaceStatus := computev1alpha.InstanceNetworkInterfaceStatus{} + + if networkInterface.NetworkIP != nil { + datumNetworkInterfaceStatus.Assignments.NetworkIP = proto.String(*networkInterface.NetworkIP) + } + + for _, accessConfig := range networkInterface.AccessConfigs { + if *accessConfig.Type == "ONE_TO_ONE_NAT" && accessConfig.NatIP != nil { + datumNetworkInterfaceStatus.Assignments.ExternalIP = proto.String(*accessConfig.NatIP) + } + } + + datumNetworkInterfaces = append(datumNetworkInterfaces, datumNetworkInterfaceStatus) + } + + if !equality.Semantic.DeepEqual(datumInstance.Status.NetworkInterfaces, datumNetworkInterfaces) { + statusUpdated = true + datumInstance.Status.NetworkInterfaces = datumNetworkInterfaces + } + + var reconcileResult ctrl.Result + switch instanceStatus { + case "RUNNING": + changed := apimeta.SetStatusCondition(&datumInstance.Status.Conditions, metav1.Condition{ + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "InstanceIsRunning", + ObservedGeneration: datumInstance.Generation, + Message: "GCP Instance status is RUNNING", + }) + if changed { + statusUpdated = true + } + default: + reconcileResult.RequeueAfter = 10 * time.Second + + changed := apimeta.SetStatusCondition(&datumInstance.Status.Conditions, metav1.Condition{ + Type: "Available", + Status: metav1.ConditionFalse, + Reason: fmt.Sprintf("InstanceIs%s%s", string(instanceStatus[0]), strings.ToLower(instanceStatus[1:])), + ObservedGeneration: datumInstance.Generation, + Message: fmt.Sprintf("GCP Instance status is %s", instanceStatus), + }) + if changed { + statusUpdated = true + } + } + + if statusUpdated { + if err := r.Client.Status().Update(ctx, datumInstance); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update datum instance status: %w", err) + } + } + return reconcileResult, nil +} + +func (r *InstanceDiscoveryReconciler) Finalize( + ctx context.Context, + obj client.Object, +) (finalizer.Result, error) { + + return finalizer.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *InstanceDiscoveryReconciler) SetupWithManager(mgr ctrl.Manager, infraCluster cluster.Cluster) error { + + instancesClient, err := gcpcomputev1.NewInstancesRESTClient(context.Background()) + if err != nil { + return fmt.Errorf("failed to create instance group managers client: %w", err) + } + r.instancesClient = instancesClient + + instanceTemplatesClient, err := gcpcomputev1.NewInstanceTemplatesRESTClient(context.Background()) + if err != nil { + return fmt.Errorf("failed to create instance group managers client: %w", err) + } + r.instanceTemplatesClient = instanceTemplatesClient + + instanceGroupManagersClient, err := gcpcomputev1.NewInstanceGroupManagersRESTClient(context.Background()) + if err != nil { + return fmt.Errorf("failed to create instance group managers client: %w", err) + } + r.migClient = instanceGroupManagersClient + + r.finalizers = finalizer.NewFinalizers() + if err := r.finalizers.Register(gcpWorkloadFinalizer, r); err != nil { + return fmt.Errorf("failed to register finalizer: %w", err) + } + + // Watch the unstructured form of an instance group manager, as the generated + // types are not aligned with the actual CRD. + var instanceGroupManager unstructured.Unstructured + instanceGroupManager.SetGroupVersionKind(kcccomputev1beta1.ComputeInstanceGroupManagerGVK) + + return ctrl.NewControllerManagedBy(mgr). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &instanceGroupManager, + &handler.TypedEnqueueRequestForObject[*unstructured.Unstructured]{}, + )). + Named("instancediscovery"). + Complete(r) +} + +func extractUnstructuredConditions( + obj map[string]interface{}, +) ([]kcccomputev1alpha1.Condition, error) { + conditions, ok, _ := unstructured.NestedSlice(obj, "status", "conditions") + if !ok { + return nil, nil + } + + wrappedConditions := map[string]interface{}{ + "conditions": conditions, + } + + var typedConditions struct { + Conditions []kcccomputev1alpha1.Condition `json:"conditions"` + } + + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(wrappedConditions, &typedConditions); err != nil { + return nil, fmt.Errorf("failed converting unstructured conditions: %w", err) + } + + return typedConditions.Conditions, nil +} diff --git a/internal/controller/k8sconfigconnector/conditions.go b/internal/controller/k8sconfigconnector/conditions.go new file mode 100644 index 0000000..de9101e --- /dev/null +++ b/internal/controller/k8sconfigconnector/conditions.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package k8sconfigconnector + +import ( + gcpcomputev1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" + corev1 "k8s.io/api/core/v1" +) + +// IsStatusConditionTrue returns true when the conditionType is present and set to `gcpcomputev1alpha1.ConditionTrue` +func IsStatusConditionTrue(conditions []gcpcomputev1alpha1.Condition, conditionType string) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionTrue) +} + +// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsStatusConditionPresentAndEqual(conditions []gcpcomputev1alpha1.Condition, conditionType string, status corev1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} diff --git a/internal/controller/networkcontext_controller.go b/internal/controller/networkcontext_controller.go new file mode 100644 index 0000000..3b45973 --- /dev/null +++ b/internal/controller/networkcontext_controller.go @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + "fmt" + + kcccomputev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/compute/v1beta1" + kcccomputev1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" + "google.golang.org/protobuf/proto" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/finalizer" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/source" + + "go.datum.net/infra-provider-gcp/internal/controller/k8sconfigconnector" + "go.datum.net/infra-provider-gcp/internal/crossclusterutil" + "go.datum.net/infra-provider-gcp/internal/locationutil" + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +// NetworkContextReconciler reconciles a NetworkContext and ensures that a GCP +// ComputeNetwork is created to represent the context within GCP. +type NetworkContextReconciler struct { + client.Client + InfraClient client.Client + Scheme *runtime.Scheme + LocationClassName string + InfraClusterNamespaceName string + + finalizers finalizer.Finalizers +} + +// +kubebuilder:rbac:groups=compute.datumapis.com,resources=networkcontexts,verbs=get;list;watch +// +kubebuilder:rbac:groups=compute.datumapis.com,resources=networkcontexts/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=compute.datumapis.com,resources=networkcontexts/finalizers,verbs=update + +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computenetworks,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computenetworks/status,verbs=get + +func (r *NetworkContextReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, err error) { + logger := log.FromContext(ctx) + + var networkContext networkingv1alpha.NetworkContext + if err := r.Client.Get(ctx, req.NamespacedName, &networkContext); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + location, shouldProcess, err := locationutil.GetLocation(ctx, r.Client, networkContext.Spec.Location, r.LocationClassName) + if err != nil { + return ctrl.Result{}, err + } else if !shouldProcess { + return ctrl.Result{}, nil + } + + logger.Info("reconciling network context") + defer logger.Info("reconcile complete") + + finalizationResult, err := r.finalizers.Finalize(ctx, &networkContext) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to finalize: %w", err) + } + if finalizationResult.Updated { + if err = r.Client.Update(ctx, &networkContext); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update based on finalization result: %w", err) + } + return ctrl.Result{}, nil + } + + if !networkContext.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + + readyCondition := metav1.Condition{ + Type: networkingv1alpha.NetworkBindingReady, + Status: metav1.ConditionFalse, + Reason: "Unknown", + ObservedGeneration: networkContext.Generation, + Message: "Unknown state", + } + + defer func() { + if err != nil { + // Don't update the status if errors are encountered + return + } + statusChanged := apimeta.SetStatusCondition(&networkContext.Status.Conditions, readyCondition) + + if statusChanged { + err = r.Client.Status().Update(ctx, &networkContext) + } + }() + + var network networkingv1alpha.Network + networkObjectKey := client.ObjectKey{ + Namespace: networkContext.Namespace, + Name: networkContext.Spec.Network.Name, + } + if err := r.Client.Get(ctx, networkObjectKey, &network); err != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching network: %w", err) + } + + kccNetworkName := fmt.Sprintf("network-%s", networkContext.UID) + + var kccNetwork kcccomputev1beta1.ComputeNetwork + kccNetworkObjectKey := client.ObjectKey{ + Namespace: r.InfraClusterNamespaceName, + Name: kccNetworkName, + } + if err := r.InfraClient.Get(ctx, kccNetworkObjectKey, &kccNetwork); client.IgnoreNotFound(err) != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching gcp network: %w", err) + } + + if kccNetwork.CreationTimestamp.IsZero() { + logger.Info("creating GCP network") + + kccNetwork = kcccomputev1beta1.ComputeNetwork{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: kccNetworkObjectKey.Namespace, + Name: kccNetworkObjectKey.Name, + Annotations: map[string]string{ + GCPProjectAnnotation: location.Spec.Provider.GCP.ProjectID, + }, + }, + Spec: kcccomputev1beta1.ComputeNetworkSpec{ + Mtu: proto.Int64(int64(network.Spec.MTU)), + }, + } + + kccNetwork.Spec.AutoCreateSubnetworks = proto.Bool(false) + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, &networkContext, &kccNetwork, r.Scheme); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to set controller on network context: %w", err) + } + + if err := r.InfraClient.Create(ctx, &kccNetwork); err != nil { + return ctrl.Result{}, fmt.Errorf("failed creating gcp network: %w", err) + } + } + + if !k8sconfigconnector.IsStatusConditionTrue(kccNetwork.Status.Conditions, kcccomputev1alpha1.ReadyConditionType) { + logger.Info("GCP network not ready yet") + readyCondition.Reason = "ProviderNetworkNotReady" + readyCondition.Message = "Network is not ready." + return ctrl.Result{}, nil + } + + readyCondition.Status = metav1.ConditionTrue + readyCondition.Reason = "NetworkReady" + readyCondition.Message = "Network is ready." + + return ctrl.Result{}, nil +} + +func (r *NetworkContextReconciler) Finalize( + ctx context.Context, + obj client.Object, +) (finalizer.Result, error) { + + if err := crossclusterutil.DeleteAnchorForObject(ctx, r.Client, r.InfraClient, obj, r.InfraClusterNamespaceName); err != nil { + return finalizer.Result{}, fmt.Errorf("failed deleting network context anchor: %w", err) + } + + return finalizer.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NetworkContextReconciler) SetupWithManager(mgr ctrl.Manager, infraCluster cluster.Cluster) error { + r.finalizers = finalizer.NewFinalizers() + if err := r.finalizers.Register(gcpInfraFinalizer, r); err != nil { + return fmt.Errorf("failed to register finalizer: %w", err) + } + + return ctrl.NewControllerManagedBy(mgr). + For(&networkingv1alpha.NetworkContext{}). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &kcccomputev1beta1.ComputeNetwork{}, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*kcccomputev1beta1.ComputeNetwork](mgr.GetScheme(), &networkingv1alpha.NetworkContext{}), + )). + Complete(r) +} diff --git a/internal/controller/result.go b/internal/controller/result.go new file mode 100644 index 0000000..5b4d81a --- /dev/null +++ b/internal/controller/result.go @@ -0,0 +1,24 @@ +package controller + +import ctrl "sigs.k8s.io/controller-runtime" + +type Result struct { + // Result contains the result of a Reconciler invocation. + ctrl.Result + + // Err contains an error of a Reconciler invocation + Err error + + // StopProcessing indicates that the caller should not continue processing and + // let the Reconciler go to sleep without an explicit requeue, expecting a + // Watch to trigger a future reconciliation call. + StopProcessing bool +} + +func (r Result) ShouldReturn() bool { + return r.Err != nil || !r.Result.IsZero() || r.StopProcessing +} + +func (r Result) Get() (ctrl.Result, error) { + return r.Result, r.Err +} diff --git a/internal/controller/workloaddeployment_controller.go b/internal/controller/workloaddeployment_controller.go new file mode 100644 index 0000000..51f26d1 --- /dev/null +++ b/internal/controller/workloaddeployment_controller.go @@ -0,0 +1,1713 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "hash/fnv" + "path" + "strconv" + "strings" + "time" + + _ "embed" + + kcccomputev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/compute/v1beta1" + kcciamv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/iam/v1beta1" + kcccomputev1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" + kccsecretmanagerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/secretmanager/v1beta1" + "github.com/go-logr/logr" + "google.golang.org/protobuf/proto" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + k8sjson "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/util/sets" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/finalizer" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/source" + + "go.datum.net/infra-provider-gcp/internal/controller/cloudinit" + "go.datum.net/infra-provider-gcp/internal/controller/k8sconfigconnector" + "go.datum.net/infra-provider-gcp/internal/crossclusterutil" + "go.datum.net/infra-provider-gcp/internal/locationutil" + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" + computev1alpha "go.datum.net/workload-operator/api/v1alpha" +) + +var imageMap = map[string]string{ + "datumcloud/ubuntu-2204-lts": "projects/ubuntu-os-cloud/global/images/ubuntu-2204-jammy-v20240927", + "datumcloud/cos-stable-117-18613-0-79": "projects/cos-cloud/global/images/cos-stable-117-18613-0-79", +} + +var machineTypeMap = map[string]string{ + "datumcloud/d1-standard-2": "n2-standard-2", +} + +const gcpInfraFinalizer = "compute.datumapis.com/infra-provider-gcp-deployment-controller" +const deploymentUIDLabel = "compute.datumapis.com/deployment-uid" + +//go:embed cloudinit/populate_secrets.py +var populateSecretsScript string + +// WorkloadDeploymentReconciler reconciles a WorkloadDeployment object +type WorkloadDeploymentReconciler struct { + client.Client + InfraClient client.Client + Scheme *runtime.Scheme + LocationClassName string + InfraClusterNamespaceName string + + finalizers finalizer.Finalizers +} + +// +kubebuilder:rbac:groups=compute.datumapis.com,resources=workloaddeployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.datumapis.com,resources=workloaddeployments/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=compute.datumapis.com,resources=workloaddeployments/finalizers,verbs=update + +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computefirewalls,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computefirewalls/status,verbs=get +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computeinstancegroupmanagers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computeinstancegroupmanagers/status,verbs=get +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computeinstancetemplates,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computeinstancetemplates/status,verbs=get +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computesubnetworks,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computesubnetworks/status,verbs=get + +// +kubebuilder:rbac:groups=iam.cnrm.cloud.google.com,resources=iampolicies,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=iam.cnrm.cloud.google.com,resources=iampolicies/status,verbs=get +// +kubebuilder:rbac:groups=iam.cnrm.cloud.google.com,resources=iamserviceaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=iam.cnrm.cloud.google.com,resources=iamserviceaccounts/status,verbs=get + +// +kubebuilder:rbac:groups=secretmanager.cnrm.cloud.google.com,resources=secretmanagersecrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=secretmanager.cnrm.cloud.google.com,resources=secretmanagersecrets/status,verbs=get +// +kubebuilder:rbac:groups=secretmanager.cnrm.cloud.google.com,resources=secretmanagersecretversions,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=secretmanager.cnrm.cloud.google.com,resources=secretmanagersecretversions/status,verbs=get + +func (r *WorkloadDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + var deployment computev1alpha.WorkloadDeployment + if err := r.Client.Get(ctx, req.NamespacedName, &deployment); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Don't do anything if a location isn't set + if deployment.Status.Location == nil { + return ctrl.Result{}, nil + } + + _, shouldProcess, err := locationutil.GetLocation(ctx, r.Client, *deployment.Status.Location, r.LocationClassName) + if err != nil { + return ctrl.Result{}, err + } else if !shouldProcess { + return ctrl.Result{}, nil + } + + logger.Info("reconciling deployment") + defer logger.Info("reconcile complete") + + finalizationResult, err := r.finalizers.Finalize(ctx, &deployment) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to finalize: %w", err) + } + if finalizationResult.Updated { + if err = r.Client.Update(ctx, &deployment); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update based on finalization result: %w", err) + } + return ctrl.Result{}, nil + } + + if !deployment.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + + // TODO(jreese) for both this reconciler and the gateway one, handle updates + // appropriately. + + runtime := deployment.Spec.Template.Spec.Runtime + if runtime.Sandbox != nil { + return r.reconcileSandboxRuntimeDeployment(ctx, logger, &deployment) + } else if runtime.VirtualMachine != nil { + return r.reconcileVMRuntimeDeployment(ctx, logger, &deployment) + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *WorkloadDeploymentReconciler) SetupWithManager(mgr ctrl.Manager, infraCluster cluster.Cluster) error { + r.finalizers = finalizer.NewFinalizers() + if err := r.finalizers.Register(gcpInfraFinalizer, r); err != nil { + return fmt.Errorf("failed to register finalizer: %w", err) + } + + // Watch the unstructured form of an instance group manager, as the generated + // types are not aligned with the actual CRD. + var instanceGroupManager unstructured.Unstructured + instanceGroupManager.SetGroupVersionKind(kcccomputev1beta1.ComputeInstanceGroupManagerGVK) + + return ctrl.NewControllerManagedBy(mgr). + For(&computev1alpha.WorkloadDeployment{}). + Owns(&networkingv1alpha.NetworkBinding{}). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &kcccomputev1beta1.ComputeFirewall{}, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*kcccomputev1beta1.ComputeFirewall](mgr.GetScheme(), &computev1alpha.WorkloadDeployment{}), + )). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &kcccomputev1beta1.ComputeInstanceTemplate{}, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*kcccomputev1beta1.ComputeInstanceTemplate](mgr.GetScheme(), &computev1alpha.WorkloadDeployment{}), + )). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &kcciamv1beta1.IAMServiceAccount{}, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*kcciamv1beta1.IAMServiceAccount](mgr.GetScheme(), &computev1alpha.WorkloadDeployment{}), + )). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &instanceGroupManager, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*unstructured.Unstructured](mgr.GetScheme(), &computev1alpha.WorkloadDeployment{}), + )). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &kccsecretmanagerv1beta1.SecretManagerSecret{}, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*kccsecretmanagerv1beta1.SecretManagerSecret](mgr.GetScheme(), &computev1alpha.WorkloadDeployment{}), + )). + Complete(r) +} + +func (r *WorkloadDeploymentReconciler) reconcileDeployment( + ctx context.Context, + logger logr.Logger, + deployment *computev1alpha.WorkloadDeployment, + cloudConfig *cloudinit.CloudConfig, + instanceMetadata []kcccomputev1beta1.InstancetemplateMetadata, +) (res ctrl.Result, err error) { + + var location networkingv1alpha.Location + locationObjectKey := client.ObjectKey{ + Namespace: deployment.Status.Location.Namespace, + Name: deployment.Status.Location.Name, + } + if err := r.Client.Get(ctx, locationObjectKey, &location); err != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching location: %w", err) + } + + if location.Spec.Provider.GCP == nil { + return ctrl.Result{}, fmt.Errorf("attached location is not for the GCP provider") + } + + gcpProject := location.Spec.Provider.GCP.ProjectID + gcpRegion := location.Spec.Provider.GCP.Region + gcpZone := location.Spec.Provider.GCP.Zone + + cloudConfig.Hostname = fmt.Sprintf(`{%% set parts = v1.local_hostname.split('-') %%} +%s-{{ parts[-1] }}`, deployment.Name) + cloudConfig.PreserveHostname = proto.Bool(false) + // COS doesn't run the hostname module, this happens to work... Need to use our + // own image. + cloudConfig.RunCmd = append(cloudConfig.RunCmd, "cloud-init single --name cc_set_hostname") + + availableCondition := metav1.Condition{ + Type: computev1alpha.WorkloadDeploymentAvailable, + Status: metav1.ConditionFalse, + Reason: "DeploymentResourcesNotReady", + ObservedGeneration: deployment.Generation, + Message: "Deployment resources are not ready", + } + + defer func() { + if err != nil { + // Don't update the status if errors are encountered + return + } + statusChanged := apimeta.SetStatusCondition(&deployment.Status.Conditions, availableCondition) + + if statusChanged { + err = r.Client.Status().Update(ctx, deployment) + } + }() + + if err := r.reconcileNetworkInterfaceNetworkPolicies(ctx, logger, gcpProject, r.InfraClusterNamespaceName, deployment); err != nil { + return ctrl.Result{}, fmt.Errorf("failed reconciling network interface network policies: %w", err) + } + + // Service account names cannot exceed 30 characters + // TODO(jreese) move to base36, as the underlying bytes won't be lost + h := fnv.New32a() + h.Write([]byte(deployment.Spec.WorkloadRef.UID)) + + var serviceAccount kcciamv1beta1.IAMServiceAccount + serviceAccountObjectKey := client.ObjectKey{ + Namespace: r.InfraClusterNamespaceName, + Name: fmt.Sprintf("workload-%d", h.Sum32()), + } + if err := r.InfraClient.Get(ctx, serviceAccountObjectKey, &serviceAccount); client.IgnoreNotFound(err) != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching deployment's service account: %w", err) + } + + if serviceAccount.CreationTimestamp.IsZero() { + serviceAccount = kcciamv1beta1.IAMServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: serviceAccountObjectKey.Namespace, + Name: serviceAccountObjectKey.Name, + Annotations: map[string]string{ + GCPProjectAnnotation: gcpProject, + }, + }, + Spec: kcciamv1beta1.IAMServiceAccountSpec{ + Description: proto.String(fmt.Sprintf("service account for workload %s", deployment.Spec.WorkloadRef.UID)), + }, + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, deployment, &serviceAccount, r.Scheme); err != nil { + return ctrl.Result{}, fmt.Errorf("failed failed to set controller on service account: %w", err) + } + + if err := r.InfraClient.Create(ctx, &serviceAccount); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create deployment's service account: %w", err) + } + } + + if !k8sconfigconnector.IsStatusConditionTrue(serviceAccount.Status.Conditions, kcccomputev1alpha1.ReadyConditionType) { + logger.Info("service account not ready yet") + availableCondition.Reason = "ServiceAccountNotReady" + return ctrl.Result{}, nil + } + + // TODO(jreese) add IAM Policy to the GCP service account to allow the service + // account used by k8s-config-connector the `roles/iam.serviceAccountUser` role, + // so that it can create instances with the service account without needing a + // project level role binding. Probably just pass in the service account + // email, otherwise we'd have to do some kind of discovery. + + if err := r.reconcileConfigMaps(ctx, cloudConfig, deployment); err != nil { + return ctrl.Result{}, fmt.Errorf("failed reconciling configmaps: %w", err) + } + + proceed, err := r.reconcileSecrets(ctx, logger, gcpProject, r.InfraClusterNamespaceName, &availableCondition, cloudConfig, deployment, serviceAccount) + if !proceed || err != nil { + return ctrl.Result{}, err + } + + result, instanceTemplate, oldInstanceTemplate, err := r.reconcileInstanceTemplate(ctx, logger, gcpProject, gcpRegion, r.InfraClusterNamespaceName, &availableCondition, deployment, cloudConfig, instanceMetadata, &serviceAccount) + if !result.IsZero() || err != nil { + return result, err + } + + if !k8sconfigconnector.IsStatusConditionTrue(instanceTemplate.Status.Conditions, kcccomputev1alpha1.ReadyConditionType) { + logger.Info("instance template not ready yet") + availableCondition.Reason = "InstanceTemplateNotReady" + return ctrl.Result{}, nil + } + + instanceGroupManager, err := r.reconcileInstanceGroupManager(ctx, logger, gcpProject, gcpZone, r.InfraClusterNamespaceName, &availableCondition, deployment, instanceTemplate) + if err != nil { + return ctrl.Result{}, err + } + + proceed, err = r.checkInstanceGroupManagerReadiness(logger, &availableCondition, instanceGroupManager) + if !proceed || err != nil { + return ctrl.Result{}, err + } + + result, err = r.updateDeploymentStatus(ctx, logger, &availableCondition, deployment, instanceGroupManager) + if !result.IsZero() || err != nil { + return result, err + } + + if !oldInstanceTemplate.CreationTimestamp.IsZero() { + logger.Info("deleting old instance template") + if err := r.InfraClient.Delete(ctx, oldInstanceTemplate); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to delete instance instance template: %w", err) + } + + logger.Info("old instance template deleted") + } + + return ctrl.Result{}, nil +} + +func (r *WorkloadDeploymentReconciler) reconcileSandboxRuntimeDeployment( + ctx context.Context, + logger logr.Logger, + deployment *computev1alpha.WorkloadDeployment, +) (ctrl.Result, error) { + logger.Info("processing sandbox based workload") + + runtimeSpec := deployment.Spec.Template.Spec.Runtime + + pod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "instance", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + }, + } + + volumeMap := map[string]computev1alpha.VolumeSource{} + for _, v := range deployment.Spec.Template.Spec.Volumes { + volumeMap[v.Name] = v.VolumeSource + } + + for _, c := range runtimeSpec.Sandbox.Containers { + // TODO(jreese) handle env vars that use `valueFrom` + container := corev1.Container{ + Name: c.Name, + Image: c.Image, + Env: c.Env, + } + + for _, attachment := range c.VolumeAttachments { + if attachment.MountPath != nil { + volume := volumeMap[attachment.Name] + + if volume.Disk != nil { + populator := volume.Disk.Template.Spec.Populator + if populator == nil || populator.Filesystem == nil { + return ctrl.Result{}, fmt.Errorf("cannot mount volume with unknown filesystem") + } + + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ + Name: fmt.Sprintf("disk-%s", attachment.Name), + MountPath: *attachment.MountPath, + }) + } + + if volume.ConfigMap != nil { + // Cloud-init will place files at /etc/configmaps// + + if len(volume.ConfigMap.Items) > 0 { + // TODO(jreese) implement this + logger.Info("attaching specific configmap items is not currently supported") + return ctrl.Result{}, nil + } + + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ + Name: fmt.Sprintf("configmap-%s", volume.ConfigMap.Name), + MountPath: *attachment.MountPath, + }) + } + + if volume.Secret != nil { + if len(volume.Secret.Items) > 0 { + // TODO(jreese) implement this + logger.Info("attaching specific secret items is not currently supported") + return ctrl.Result{}, nil + } + + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ + Name: fmt.Sprintf("secret-%s", volume.Secret.SecretName), + MountPath: *attachment.MountPath, + }) + } + } + } + + pod.Spec.Containers = append(pod.Spec.Containers, container) + } + + cloudConfig := &cloudinit.CloudConfig{} + cloudConfig.RunCmd = []string{ + // Rely on network policies + "iptables -I INPUT 1 -j ACCEPT", + "systemctl enable kubelet --now", + } + + hostPathType := corev1.HostPathDirectory + + // Add pod volumes + for _, volume := range deployment.Spec.Template.Spec.Volumes { + if volume.Disk != nil { + populator := volume.Disk.Template.Spec.Populator + if populator == nil || populator.Filesystem == nil { + continue + } + + pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ + Name: fmt.Sprintf("disk-%s", volume.Name), + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: fmt.Sprintf("/mnt/disk-%s", volume.Name), + Type: &hostPathType, + }, + }, + }) + } + + if volume.ConfigMap != nil { + pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ + Name: fmt.Sprintf("configmap-%s", volume.ConfigMap.Name), + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: fmt.Sprintf("/etc/configmaps/%s", volume.ConfigMap.Name), + Type: &hostPathType, + }, + }, + }) + } + + if volume.Secret != nil { + // This content is populated by the populate_secrets.sh script. + pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ + Name: fmt.Sprintf("secret-%s", volume.Secret.SecretName), + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: fmt.Sprintf("/etc/secrets/content/%s", volume.Secret.SecretName), + Type: &hostPathType, + }, + }, + }) + } + } + + serializer := k8sjson.NewSerializerWithOptions( + k8sjson.DefaultMetaFactory, + r.Scheme, + r.Scheme, + k8sjson.SerializerOptions{Yaml: true, Pretty: true}, + ) + + podSpecBytes, err := k8sruntime.Encode(serializer, pod) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to marshal pod spec: %w", err) + } + + cloudConfig.WriteFiles = append(cloudConfig.WriteFiles, cloudinit.WriteFile{ + Encoding: "b64", + Content: base64.StdEncoding.EncodeToString(podSpecBytes), + Owner: "root:root", + Path: "/etc/kubernetes/manifests/instance.yaml", + Permissions: "0644", + }) + + // Inject a boot volume + deployment = deployment.DeepCopy() + deployment.Spec.Template.Spec.Volumes = append([]computev1alpha.InstanceVolume{ + { + Name: "datum-boot", + VolumeSource: computev1alpha.VolumeSource{ + Disk: &computev1alpha.DiskTemplateVolumeSource{ + Template: &computev1alpha.DiskTemplateVolumeSourceTemplate{ + Spec: computev1alpha.DiskSpec{ + Type: "pd-standard", + Populator: &computev1alpha.DiskPopulator{ + Image: &computev1alpha.ImageDiskPopulator{ + Name: "datumcloud/cos-stable-117-18613-0-79", + }, + }, + }, + }, + }, + }, + }, + }, deployment.Spec.Template.Spec.Volumes...) + + return r.reconcileDeployment( + ctx, + logger, + deployment, + cloudConfig, + nil, + ) +} + +func (r *WorkloadDeploymentReconciler) reconcileVMRuntimeDeployment( + ctx context.Context, + logger logr.Logger, + deployment *computev1alpha.WorkloadDeployment, +) (ctrl.Result, error) { + + logger.Info("processing VM based workload") + + runtimeSpec := deployment.Spec.Template.Spec.Runtime + + instanceMetadata := []kcccomputev1beta1.InstancetemplateMetadata{ + { + Key: "ssh-keys", + Value: deployment.Spec.Template.Annotations[computev1alpha.SSHKeysAnnotation], + }, + } + + volumeMap := map[string]computev1alpha.VolumeSource{} + for _, v := range deployment.Spec.Template.Spec.Volumes { + volumeMap[v.Name] = v.VolumeSource + } + + cloudConfig := &cloudinit.CloudConfig{} + + mountParentDirs := sets.Set[string]{} + for _, attachment := range runtimeSpec.VirtualMachine.VolumeAttachments { + if attachment.MountPath != nil { + volume := volumeMap[attachment.Name] + + // Disk backed volumes are currently handed inside `buildInstanceTemplateVolumes` + + if volume.ConfigMap != nil { + // Cloud-init will place files at /etc/configmaps// + + if len(volume.ConfigMap.Items) > 0 { + // TODO(jreese) implement this + logger.Info("attaching specific configmap items is not currently supported") + return ctrl.Result{}, nil + } + + mountParentDirs.Insert(fmt.Sprintf("mkdir -p %s", path.Dir(*attachment.MountPath))) + + cloudConfig.RunCmd = append( + cloudConfig.RunCmd, + fmt.Sprintf("ln -s /etc/configmaps/%s %s", volume.ConfigMap.Name, *attachment.MountPath), + ) + } + + if volume.Secret != nil { + if len(volume.Secret.Items) > 0 { + // TODO(jreese) implement this + logger.Info("attaching specific secret items is not currently supported") + return ctrl.Result{}, nil + } + + mountParentDirs.Insert(fmt.Sprintf("mkdir -p %s", path.Dir(*attachment.MountPath))) + + cloudConfig.RunCmd = append( + cloudConfig.RunCmd, + fmt.Sprintf("ln -s /etc/secrets/content/%s %s", volume.Secret.SecretName, *attachment.MountPath), + ) + } + } + } + + cloudConfig.RunCmd = append(mountParentDirs.UnsortedList(), cloudConfig.RunCmd...) + + return r.reconcileDeployment( + ctx, + logger, + deployment, + cloudConfig, + instanceMetadata, + ) +} + +func (r *WorkloadDeploymentReconciler) reconcileNetworkInterfaceNetworkPolicies( + ctx context.Context, + logger logr.Logger, + gcpProject string, + infraClusterNamespaceName string, + deployment *computev1alpha.WorkloadDeployment, +) error { + for interfaceIndex, networkInterface := range deployment.Spec.Template.Spec.NetworkInterfaces { + interfacePolicy := networkInterface.NetworkPolicy + if interfacePolicy == nil { + continue + } + + var networkBinding networkingv1alpha.NetworkBinding + networkBindingObjectKey := client.ObjectKey{ + Namespace: deployment.Namespace, + Name: fmt.Sprintf("%s-net-%d", deployment.Name, interfaceIndex), + } + + if err := r.Client.Get(ctx, networkBindingObjectKey, &networkBinding); err != nil { + return fmt.Errorf("failed fetching network binding for interface: %w", err) + } + + if networkBinding.Status.NetworkContextRef == nil { + return fmt.Errorf("network binding not associated with network context") + } + + var networkContext networkingv1alpha.NetworkContext + networkContextObjectKey := client.ObjectKey{ + Namespace: networkBinding.Status.NetworkContextRef.Namespace, + Name: networkBinding.Status.NetworkContextRef.Name, + } + if err := r.Client.Get(ctx, networkContextObjectKey, &networkContext); err != nil { + return fmt.Errorf("failed fetching network context: %w", err) + } + + // TODO(jreese) change this to where a higher level datum controller makes a + // network policy in the network service as a result of reacting to a + // workload being created that has an interface policy + + for ruleIndex, ingressRule := range interfacePolicy.Ingress { + + firewallName := fmt.Sprintf("deployment-%s-net-%d-%d", deployment.UID, interfaceIndex, ruleIndex) + + var firewall kcccomputev1beta1.ComputeFirewall + firewallObjectKey := client.ObjectKey{ + Namespace: infraClusterNamespaceName, + // TODO(jreese) create name that is going to be unique across all source + // namespaces within the target GCP project. + Name: firewallName, + } + + if err := r.InfraClient.Get(ctx, firewallObjectKey, &firewall); client.IgnoreNotFound(err) != nil { + return fmt.Errorf("failed to read firewall from k8s API: %w", err) + } + + if firewall.CreationTimestamp.IsZero() { + logger.Info("creating firewall for interface policy rule") + firewall = kcccomputev1beta1.ComputeFirewall{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: firewallObjectKey.Namespace, + Name: firewallObjectKey.Name, + Annotations: map[string]string{ + GCPProjectAnnotation: gcpProject, + }, + }, + Spec: kcccomputev1beta1.ComputeFirewallSpec{ + Description: proto.String(fmt.Sprintf( + "instance interface policy for %s: interfaceIndex:%d, ruleIndex:%d", + deployment.Name, + interfaceIndex, + ruleIndex, + )), + Direction: proto.String("INGRESS"), + NetworkRef: kcccomputev1alpha1.ResourceRef{ + Namespace: infraClusterNamespaceName, + Name: fmt.Sprintf("network-%s", networkContext.UID), + }, + Priority: proto.Int64(65534), + TargetTags: []string{ + fmt.Sprintf("deployment-%s", deployment.UID), + }, + }, + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, deployment, &firewall, r.Scheme); err != nil { + return fmt.Errorf("failed failed to set controller on firewall: %w", err) + } + + for _, port := range ingressRule.Ports { + ipProtocol := "tcp" + if port.Protocol != nil { + ipProtocol = strings.ToLower(string(*port.Protocol)) + } + + var gcpPorts []string + if port.Port != nil { + var gcpPort string + + gcpPort = strconv.Itoa(port.Port.IntValue()) + if gcpPort == "0" { + // TODO(jreese) look up named port + logger.Info("named port lookup not implemented") + return nil + } + + if port.EndPort != nil { + gcpPort = fmt.Sprintf("%s-%d", gcpPort, *port.EndPort) + } + + gcpPorts = append(gcpPorts, gcpPort) + } + + firewall.Spec.Allow = append(firewall.Spec.Allow, kcccomputev1beta1.FirewallAllow{ + Protocol: ipProtocol, + Ports: gcpPorts, + }) + } + + for _, peer := range ingressRule.From { + if peer.IPBlock != nil { + firewall.Spec.SourceRanges = append(firewall.Spec.SourceRanges, peer.IPBlock.CIDR) + // TODO(jreese) implement IPBlock.Except as a separate rule of one higher priority + } + } + + if err := r.InfraClient.Create(ctx, &firewall); err != nil { + return fmt.Errorf("failed to create firewall: %w", err) + } + } + } + } + return nil +} + +func (r *WorkloadDeploymentReconciler) reconcileConfigMaps( + ctx context.Context, + cloudConfig *cloudinit.CloudConfig, + deployment *computev1alpha.WorkloadDeployment, +) error { + var objectKeys []client.ObjectKey + for _, volume := range deployment.Spec.Template.Spec.Volumes { + if volume.ConfigMap != nil { + objectKeys = append(objectKeys, client.ObjectKey{ + Namespace: deployment.Namespace, + Name: volume.ConfigMap.Name, + }) + } + } + + if len(objectKeys) == 0 { + return nil + } + + for _, configMapObjectKey := range objectKeys { + var configMap corev1.ConfigMap + if err := r.Client.Get(ctx, configMapObjectKey, &configMap); err != nil { + return fmt.Errorf("failed to get configmap: %w", err) + } + + for k, v := range configMap.Data { + cloudConfig.WriteFiles = append(cloudConfig.WriteFiles, cloudinit.WriteFile{ + Encoding: "b64", + Content: base64.StdEncoding.EncodeToString([]byte(v)), + Owner: "root:root", + Path: fmt.Sprintf("/etc/configmaps/%s/%s", configMap.Name, k), + Permissions: "0644", + }) + } + } + + return nil +} + +func (r *WorkloadDeploymentReconciler) reconcileSecrets( + ctx context.Context, + logger logr.Logger, + // TODO(jreese) consider a reconcile context that can be passed around? + gcpProject string, + infraClusterNamespaceName string, + availableCondition *metav1.Condition, + cloudConfig *cloudinit.CloudConfig, + deployment *computev1alpha.WorkloadDeployment, + serviceAccount kcciamv1beta1.IAMServiceAccount, +) (bool, error) { + var objectKeys []client.ObjectKey + for _, volume := range deployment.Spec.Template.Spec.Volumes { + if volume.Secret != nil { + objectKeys = append(objectKeys, client.ObjectKey{ + Namespace: deployment.Namespace, + Name: volume.Secret.SecretName, + }) + } + } + + if len(objectKeys) == 0 { + return true, nil + } + + var secret kccsecretmanagerv1beta1.SecretManagerSecret + + // Aggregate secret data into one value by creating a map of secret names + // to content. This will allow for mounting of keys into volumes or secrets + // as expected. + secretData := map[string]map[string][]byte{} + for _, objectKey := range objectKeys { + var k8ssecret corev1.Secret + if err := r.Client.Get(ctx, objectKey, &k8ssecret); err != nil { + return false, fmt.Errorf("failed fetching secret: %w", err) + } + + secretData[k8ssecret.Name] = k8ssecret.Data + } + + secretBytes, err := json.Marshal(secretData) + if err != nil { + return false, fmt.Errorf("failed to marshal secret data") + } + + aggregatedK8sSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: infraClusterNamespaceName, + Name: fmt.Sprintf("deployment-%s", deployment.UID), + }, + } + _, err = controllerutil.CreateOrUpdate(ctx, r.InfraClient, aggregatedK8sSecret, func() error { + if aggregatedK8sSecret.CreationTimestamp.IsZero() { + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, deployment, aggregatedK8sSecret, r.Scheme); err != nil { + return fmt.Errorf("failed failed to set controller on aggregated deployment secret: %w", err) + } + } + + aggregatedK8sSecret.Data = map[string][]byte{ + "secretData": secretBytes, + } + + return nil + }) + + if err != nil { + return false, fmt.Errorf("failed to reconcile aggregated k8s secret: %w", err) + } + + // Create a secret in the secret manager service, grant access to the service + // account specific to the deployment. + + secretObjectKey := client.ObjectKey{ + Namespace: infraClusterNamespaceName, + Name: fmt.Sprintf("deployment-%s", deployment.UID), + } + if err := r.InfraClient.Get(ctx, secretObjectKey, &secret); client.IgnoreNotFound(err) != nil { + return false, fmt.Errorf("failed fetching deployment secret: %w", err) + } + + if secret.CreationTimestamp.IsZero() { + secret = kccsecretmanagerv1beta1.SecretManagerSecret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: secretObjectKey.Namespace, + Name: secretObjectKey.Name, + Annotations: map[string]string{ + GCPProjectAnnotation: gcpProject, + }, + }, + Spec: kccsecretmanagerv1beta1.SecretManagerSecretSpec{ + Replication: &kccsecretmanagerv1beta1.SecretReplication{ + Automatic: proto.Bool(true), + }, + }, + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, deployment, &secret, r.Scheme); err != nil { + return false, fmt.Errorf("failed failed to set controller on deployment secret manager secret: %w", err) + } + + if err := r.InfraClient.Create(ctx, &secret); err != nil { + return false, fmt.Errorf("failed to create deployment secret: %w", err) + } + } + + if !k8sconfigconnector.IsStatusConditionTrue(secret.Status.Conditions, kcccomputev1alpha1.ReadyConditionType) { + logger.Info("secret not ready yet") + availableCondition.Reason = "SecretNotReady" + return false, nil + } + + var secretIAMPolicy kcciamv1beta1.IAMPolicy + if err := r.InfraClient.Get(ctx, client.ObjectKeyFromObject(&secret), &secretIAMPolicy); client.IgnoreNotFound(err) != nil { + return false, fmt.Errorf("failed fetching secret's IAM policy: %w", err) + } + + if secretIAMPolicy.CreationTimestamp.IsZero() { + secretIAMPolicy = kcciamv1beta1.IAMPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: secret.Namespace, + Name: secret.Name, + Annotations: map[string]string{ + GCPProjectAnnotation: gcpProject, + }, + }, + Spec: kcciamv1beta1.IAMPolicySpec{ + ResourceRef: kcccomputev1alpha1.IAMResourceRef{ + Kind: "SecretManagerSecret", + Namespace: secret.Namespace, + Name: secret.Name, + }, + Bindings: []kcciamv1beta1.PolicyBindings{ + { + Role: "roles/secretmanager.secretAccessor", + Members: []string{ + *serviceAccount.Status.Member, + }, + }, + }, + }, + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, deployment, &secretIAMPolicy, r.Scheme); err != nil { + return false, fmt.Errorf("failed failed to set controller on deployment secret IAM policy: %w", err) + } + + if err := r.InfraClient.Create(ctx, &secretIAMPolicy); err != nil { + return false, fmt.Errorf("failed setting IAM policy on secret: %w", err) + } + } + + // Store secret information in the secret version + // TODO(jreese) handle updates to secrets - use Generation from aggregated + // secret manifest? + var secretVersion kccsecretmanagerv1beta1.SecretManagerSecretVersion + if err := r.InfraClient.Get(ctx, client.ObjectKeyFromObject(&secret), &secretVersion); client.IgnoreNotFound(err) != nil { + return false, fmt.Errorf("failed fetching secret manager version: %w", err) + } + + if secretVersion.CreationTimestamp.IsZero() { + // TODO(jreese) create new versions on updates + secretVersion = kccsecretmanagerv1beta1.SecretManagerSecretVersion{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: secret.Namespace, + Name: secret.Name, + Annotations: map[string]string{ + GCPProjectAnnotation: gcpProject, + }, + }, + Spec: kccsecretmanagerv1beta1.SecretManagerSecretVersionSpec{ + Enabled: proto.Bool(true), + SecretData: kccsecretmanagerv1beta1.SecretversionSecretData{ + ValueFrom: &kccsecretmanagerv1beta1.SecretversionValueFrom{ + SecretKeyRef: &kcccomputev1alpha1.SecretKeyRef{ + Key: "secretData", + Name: aggregatedK8sSecret.Name, + }, + }, + }, + SecretRef: kcccomputev1alpha1.ResourceRef{ + Namespace: secret.Namespace, + Name: secret.Name, + }, + }, + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, deployment, &secretVersion, r.Scheme); err != nil { + return false, fmt.Errorf("failed failed to set controller on secret version: %w", err) + } + + if err := r.InfraClient.Create(ctx, &secretVersion); err != nil { + return false, fmt.Errorf("failed to create secret version: %w", err) + } + } + + cloudConfig.WriteFiles = append(cloudConfig.WriteFiles, cloudinit.WriteFile{ + Encoding: "b64", + Content: base64.StdEncoding.EncodeToString([]byte(populateSecretsScript)), + Owner: "root:root", + Path: "/etc/secrets/populate_secrets.py", + Permissions: "0755", + }) + + cloudConfig.RunCmd = append( + cloudConfig.RunCmd, + fmt.Sprintf("/etc/secrets/populate_secrets.py https://secretmanager.googleapis.com/v1/%s/versions/latest:access", *secret.Status.Name), + ) + + return true, nil +} + +func (r *WorkloadDeploymentReconciler) reconcileInstanceTemplate( + ctx context.Context, + logger logr.Logger, + // TODO(jreese) consider a reconcile context that can be passed around? + gcpProject string, + gcpRegion string, + infraClusterNamespaceName string, + availableCondition *metav1.Condition, + deployment *computev1alpha.WorkloadDeployment, + cloudConfig *cloudinit.CloudConfig, + instanceMetadata []kcccomputev1beta1.InstancetemplateMetadata, + serviceAccount *kcciamv1beta1.IAMServiceAccount, +) (ctrl.Result, *kcccomputev1beta1.ComputeInstanceTemplate, *kcccomputev1beta1.ComputeInstanceTemplate, error) { + + var instanceTemplate kcccomputev1beta1.ComputeInstanceTemplate + var oldInstanceTemplate kcccomputev1beta1.ComputeInstanceTemplate + + var instanceTemplates kcccomputev1beta1.ComputeInstanceTemplateList + if err := r.InfraClient.List( + ctx, + &instanceTemplates, + []client.ListOption{ + client.InNamespace(infraClusterNamespaceName), + client.MatchingLabels{ + deploymentUIDLabel: string(deployment.UID), + }, + }..., + ); err != nil { + return ctrl.Result{}, nil, nil, fmt.Errorf("unable to list instance templates: %w", err) + } + + instanceTemplateName := fmt.Sprintf("deployment-%s-gen%d", deployment.UID, deployment.Generation) + if len(instanceTemplates.Items) > 0 { + for _, t := range instanceTemplates.Items { + if t.Name == instanceTemplateName { + instanceTemplate = t + } else { + oldInstanceTemplate = t + } + } + } + + runtimeSpec := deployment.Spec.Template.Spec.Runtime + + if instanceTemplate.CreationTimestamp.IsZero() { + availableCondition.Reason = "InstanceTemplateDoesNotExist" + logger.Info("instance template does not exist") + machineType, ok := machineTypeMap[runtimeSpec.Resources.InstanceType] + if !ok { + return ctrl.Result{}, nil, nil, fmt.Errorf("unable to map datum instance type: %s", runtimeSpec.Resources.InstanceType) + } + + userData, err := cloudConfig.Generate() + if err != nil { + return ctrl.Result{}, nil, nil, fmt.Errorf("failed generating cloud init user data: %w", err) + } + + instanceMetadata = append(instanceMetadata, kcccomputev1beta1.InstancetemplateMetadata{ + Key: "user-data", + Value: fmt.Sprintf("## template: jinja\n#cloud-config\n\n%s", string(userData)), + }) + + instanceTemplate = kcccomputev1beta1.ComputeInstanceTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: infraClusterNamespaceName, + Name: instanceTemplateName, + Annotations: map[string]string{ + GCPProjectAnnotation: gcpProject, + }, + Labels: map[string]string{ + deploymentUIDLabel: string(deployment.UID), + }, + }, + Spec: kcccomputev1beta1.ComputeInstanceTemplateSpec{ + MachineType: machineType, + CanIpForward: proto.Bool(true), + Metadata: instanceMetadata, + ServiceAccount: &kcccomputev1beta1.InstancetemplateServiceAccount{ + Scopes: []string{"cloud-platform"}, + ServiceAccountRef: &kcccomputev1alpha1.ResourceRef{ + Namespace: serviceAccount.Namespace, + Name: serviceAccount.Name, + }, + }, + Tags: []string{ + fmt.Sprintf("workload-%s", deployment.Spec.WorkloadRef.UID), + fmt.Sprintf("deployment-%s", deployment.UID), + }, + }, + } + + if err := r.buildInstanceTemplateVolumes(logger, cloudConfig, deployment, &instanceTemplate); err != nil { + return ctrl.Result{}, nil, nil, fmt.Errorf("failed to build instance template volumes: %w", err) + } + + result, err := r.buildInstanceTemplateNetworkInterfaces(ctx, logger, gcpProject, gcpRegion, infraClusterNamespaceName, availableCondition, deployment, &instanceTemplate) + if err != nil { + return ctrl.Result{}, nil, nil, fmt.Errorf("failed to build instance template network interfaces: %w", err) + } else if !result.IsZero() { + logger.Info("network environment is not ready to attach") + return result, nil, nil, nil + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, deployment, &instanceTemplate, r.Scheme); err != nil { + return ctrl.Result{}, nil, nil, fmt.Errorf("failed failed to set controller on instance template: %w", err) + } + + logger.Info("creating instance template for workload") + if err := r.InfraClient.Create(ctx, &instanceTemplate); err != nil { + return ctrl.Result{}, nil, nil, fmt.Errorf("failed to create instance template: %w", err) + } + + return ctrl.Result{}, nil, nil, nil + } + return ctrl.Result{}, &instanceTemplate, &oldInstanceTemplate, nil +} + +func (r *WorkloadDeploymentReconciler) buildInstanceTemplateVolumes( + logger logr.Logger, + cloudConfig *cloudinit.CloudConfig, + deployment *computev1alpha.WorkloadDeployment, + instanceTemplate *kcccomputev1beta1.ComputeInstanceTemplate, +) error { + for volumeIndex, volume := range deployment.Spec.Template.Spec.Volumes { + disk := kcccomputev1beta1.InstancetemplateDisk{ + AutoDelete: proto.Bool(true), + Labels: map[string]string{ + "volume_name": volume.Name, + }, + } + + if volume.Disk != nil { + if volume.Disk.Template != nil { + diskTemplate := volume.Disk.Template + + // TODO(jreese) we'll need to have our images have different udev rules + // so that device names are enumerated at `/dev/disk/by-id/datumcloud-*` + // instead of `/dev/disk/by-id/google-*` + disk.DiskType = proto.String(diskTemplate.Spec.Type) + + if volume.Disk.DeviceName == nil { + disk.DeviceName = proto.String(fmt.Sprintf("volume-%d", volumeIndex)) + } else { + disk.DeviceName = proto.String(*volume.Disk.DeviceName) + } + + if populator := diskTemplate.Spec.Populator; populator != nil { + if populator.Image != nil { + // TODO(jreese) Should we only allow one volume to be populated by + // an image per instance? + disk.Boot = proto.Bool(true) + // Should be prevented by validation, but be safe + sourceImage, ok := imageMap[populator.Image.Name] + if !ok { + return fmt.Errorf("unable to map datum image name: %s", populator.Image.Name) + } + + disk.SourceImageRef = &kcccomputev1alpha1.ResourceRef{ + External: sourceImage, + } + } + + if populator.Filesystem != nil { + // Filesystem based populator, add cloud-init data to format the disk + // and make the volume available to mount into containers. + + // TODO(jreese) we'll need to have our images have different udev rules + // so that device names are enumerated at `/dev/disk/by-id/datumcloud-*` + // instead of `/dev/disk/by-id/google-*` + + devicePath := fmt.Sprintf("/dev/disk/by-id/google-%s", *disk.DeviceName) + + cloudConfig.FSSetup = append(cloudConfig.FSSetup, cloudinit.FSSetup{ + Label: fmt.Sprintf("disk-%s", volume.Name), + Filesystem: populator.Filesystem.Type, + Device: devicePath, + }) + + runtime := deployment.Spec.Template.Spec.Runtime + + if runtime.Sandbox != nil { + cloudConfig.Mounts = append(cloudConfig.Mounts, + fmt.Sprintf("[%s, %s]", devicePath, fmt.Sprintf("/mnt/disk-%s", volume.Name)), + ) + } + + if runtime.VirtualMachine != nil { + for _, attachment := range runtime.VirtualMachine.VolumeAttachments { + if attachment.Name != volume.Name { + continue + } + + if attachment.MountPath == nil { + logger.Info("unexpected VM attachment with no mount path for filesystem populated volume", "attachment_name", attachment.Name) + continue + } + + cloudConfig.Mounts = append(cloudConfig.Mounts, + fmt.Sprintf("[%s, %s]", devicePath, *attachment.MountPath), + ) + } + } + } + } + + if diskTemplate.Spec.Resources != nil { + if storage, ok := diskTemplate.Spec.Resources.Requests[corev1.ResourceStorage]; !ok { + return fmt.Errorf("unable to locate storage resource request for volume: %s", volume.Name) + } else { + disk.DiskSizeGb = proto.Int64(storage.Value() / (1024 * 1024 * 1024)) + } + } + + instanceTemplate.Spec.Disk = append(instanceTemplate.Spec.Disk, disk) + } + } + } + + return nil +} + +func (r *WorkloadDeploymentReconciler) buildInstanceTemplateNetworkInterfaces( + ctx context.Context, + logger logr.Logger, + // TODO(jreese) consider a reconcile context that can be passed around? + gcpProject string, + gcpRegion string, + infraClusterNamespaceName string, + availableCondition *metav1.Condition, + deployment *computev1alpha.WorkloadDeployment, + instanceTemplate *kcccomputev1beta1.ComputeInstanceTemplate, +) (ctrl.Result, error) { + for interfaceIndex := range deployment.Spec.Template.Spec.NetworkInterfaces { + var networkBinding networkingv1alpha.NetworkBinding + networkBindingObjectKey := client.ObjectKey{ + Namespace: deployment.Namespace, + Name: fmt.Sprintf("%s-net-%d", deployment.Name, interfaceIndex), + } + + if err := r.Client.Get(ctx, networkBindingObjectKey, &networkBinding); err != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching network binding for interface: %w", err) + } + + if networkBinding.Status.NetworkContextRef == nil { + return ctrl.Result{}, fmt.Errorf("network binding not associated with network context") + } + + var networkContext networkingv1alpha.NetworkContext + networkContextObjectKey := client.ObjectKey{ + Namespace: networkBinding.Status.NetworkContextRef.Namespace, + Name: networkBinding.Status.NetworkContextRef.Name, + } + if err := r.Client.Get(ctx, networkContextObjectKey, &networkContext); err != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching network context: %w", err) + } + + // Get subnet that should be used for instances + // TODO(jreese) filter on subnet class + var subnetClaims networkingv1alpha.SubnetClaimList + listOpts := []client.ListOption{ + client.InNamespace(networkContext.Namespace), + client.MatchingLabels{ + "cloud.datum.net/network-context": networkContext.Name, + "gcp.topology.datum.net/region": gcpRegion, + "gcp.topology.datum.net/project": gcpProject, + }, + } + + if err := r.Client.List(ctx, &subnetClaims, listOpts...); err != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching subnet claims: %w", err) + } + + if len(subnetClaims.Items) == 0 { + logger.Info("creating subnet claim") + // TODO(jreese) This is not the best long term location for subnet claims + // to be created. Need to review this. Note how we list subnet claims, but + // create one with a specific name. This won't work out in the current + // logic if another subnet is required. This really should be done + // elsewhere. Perhaps take a SchedulingGate approach, and have a separate + // controller deal with subnet needs for deployments in a location, and + // remove the gate when things are ready. + // + // Note that currently if the subnet claim or subnet is removed in the + // upstream control plane, the resources in the infra control plane will + // not be removed. This is because we don't have a dedicated controller + // for these concerns. + + subnetClaim := networkingv1alpha.SubnetClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: networkContext.Namespace, + Name: fmt.Sprintf("gcp-%s", gcpRegion), + Labels: map[string]string{ + "cloud.datum.net/network-context": networkContext.Name, + "gcp.topology.datum.net/region": gcpRegion, + "gcp.topology.datum.net/project": gcpProject, + }, + }, + Spec: networkingv1alpha.SubnetClaimSpec{ + SubnetClass: "private", + IPFamily: networkingv1alpha.IPv4Protocol, + NetworkContext: networkingv1alpha.LocalNetworkContextRef{ + Name: networkContext.Name, + }, + Location: *deployment.Status.Location, + }, + } + + if err := r.Client.Create(ctx, &subnetClaim); err != nil { + return ctrl.Result{}, fmt.Errorf("failed creating subnet claim: %w", err) + } + + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil + } + + subnetClaim := subnetClaims.Items[0] + + if !apimeta.IsStatusConditionTrue(subnetClaim.Status.Conditions, "Ready") { + availableCondition.Reason = "SubnetClaimNotReady" + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil + } + + var subnet networkingv1alpha.Subnet + subnetObjectKey := client.ObjectKey{ + Namespace: subnetClaim.Namespace, + Name: subnetClaim.Status.SubnetRef.Name, + } + if err := r.Client.Get(ctx, subnetObjectKey, &subnet); err != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching subnet: %w", err) + } + + if !apimeta.IsStatusConditionTrue(subnet.Status.Conditions, "Ready") { + availableCondition.Reason = "SubnetNotReady" + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil + } + + var kccSubnet kcccomputev1beta1.ComputeSubnetwork + kccSubnetObjectKey := client.ObjectKey{ + Namespace: infraClusterNamespaceName, + Name: fmt.Sprintf("subnet-%s", subnet.UID), + } + if err := r.InfraClient.Get(ctx, kccSubnetObjectKey, &kccSubnet); client.IgnoreNotFound(err) != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching GCP subnetwork: %w", err) + } + + if kccSubnet.CreationTimestamp.IsZero() { + kccSubnet = kcccomputev1beta1.ComputeSubnetwork{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: kccSubnetObjectKey.Namespace, + Name: kccSubnetObjectKey.Name, + Annotations: map[string]string{ + GCPProjectAnnotation: gcpProject, + }, + }, + Spec: kcccomputev1beta1.ComputeSubnetworkSpec{ + IpCidrRange: fmt.Sprintf("%s/%d", *subnet.Status.StartAddress, *subnet.Status.PrefixLength), + NetworkRef: kcccomputev1alpha1.ResourceRef{ + Namespace: infraClusterNamespaceName, + Name: fmt.Sprintf("network-%s", networkContext.UID), + }, + Purpose: proto.String("PRIVATE"), + Region: gcpRegion, + // TODO(jreese) ipv6 + StackType: proto.String("IPV4_ONLY"), + }, + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, &subnet, &kccSubnet, r.Scheme); err != nil { + return ctrl.Result{}, fmt.Errorf("failed failed to set controller on GCP subnetwork: %w", err) + } + + if err := r.InfraClient.Create(ctx, &kccSubnet); err != nil { + return ctrl.Result{}, fmt.Errorf("failed creating GCP subnetwork: %w", err) + } + } + + if !k8sconfigconnector.IsStatusConditionTrue(kccSubnet.Status.Conditions, kcccomputev1alpha1.ReadyConditionType) { + availableCondition.Reason = "SubnetNotReady" + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + + gcpInterface := kcccomputev1beta1.InstancetemplateNetworkInterface{ + NetworkRef: &kcccomputev1alpha1.ResourceRef{ + Namespace: infraClusterNamespaceName, + Name: fmt.Sprintf("network-%s", networkContext.UID), + }, + AccessConfig: []kcccomputev1beta1.InstancetemplateAccessConfig{ + { + // TODO(jreese) only enable this if instructed by workload spec + // TODO(jreese) bleh: https://github.com/GoogleCloudPlatform/k8s-config-connector/issues/329 + + // ONE_TO_ONE_NAT is enabled by default. We'll need the above fixed + // if we want to be able to omit the NAT ip + + // NatIpRef: &kcccomputev1alpha1.ResourceRef{}, + }, + }, + SubnetworkRef: &kcccomputev1alpha1.ResourceRef{ + Namespace: kccSubnet.Namespace, + Name: kccSubnet.Name, + }, + } + instanceTemplate.Spec.NetworkInterface = append(instanceTemplate.Spec.NetworkInterface, gcpInterface) + } + + return ctrl.Result{}, nil +} + +func (r *WorkloadDeploymentReconciler) reconcileInstanceGroupManager( + ctx context.Context, + logger logr.Logger, + // TODO(jreese) consider a reconcile context that can be passed around? + gcpProject string, + gcpZone string, + infraClusterNamespaceName string, + availableCondition *metav1.Condition, + deployment *computev1alpha.WorkloadDeployment, + instanceTemplate *kcccomputev1beta1.ComputeInstanceTemplate, +) (*unstructured.Unstructured, error) { + instanceGroupManagerName := fmt.Sprintf("deployment-%s", deployment.UID) + + // Unstructured is used here due to bugs in type generation. We'll likely + // completely move away from this to our own per-instance control though. + var instanceGroupManager unstructured.Unstructured + instanceGroupManager.SetGroupVersionKind(kcccomputev1beta1.ComputeInstanceGroupManagerGVK) + instanceGroupManagerObjectKey := client.ObjectKey{ + Namespace: infraClusterNamespaceName, + Name: instanceGroupManagerName, + } + if err := r.InfraClient.Get(ctx, instanceGroupManagerObjectKey, &instanceGroupManager); client.IgnoreNotFound(err) != nil { + return nil, fmt.Errorf("failed fetching instance group manager: %w", err) + } + + if t := instanceGroupManager.GetCreationTimestamp(); t.IsZero() { + availableCondition.Reason = "InstanceGroupManagerDoesNotExist" + var namedPorts []kcccomputev1beta1.InstancegroupmanagerNamedPorts + if sb := deployment.Spec.Template.Spec.Runtime.Sandbox; sb != nil { + for _, c := range sb.Containers { + for _, p := range c.Ports { + namedPorts = append(namedPorts, kcccomputev1beta1.InstancegroupmanagerNamedPorts{ + Name: proto.String(p.Name), + Port: proto.Int64(int64(p.Port)), + }) + } + } + } + + if vm := deployment.Spec.Template.Spec.Runtime.VirtualMachine; vm != nil { + for _, p := range vm.Ports { + namedPorts = append(namedPorts, kcccomputev1beta1.InstancegroupmanagerNamedPorts{ + Name: proto.String(p.Name), + Port: proto.Int64(int64(p.Port)), + }) + } + } + + instanceGroupManager := &kcccomputev1beta1.ComputeInstanceGroupManager{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: instanceGroupManagerObjectKey.Namespace, + Name: instanceGroupManagerObjectKey.Name, + }, + Spec: kcccomputev1beta1.ComputeInstanceGroupManagerSpec{ + ProjectRef: kcccomputev1alpha1.ResourceRef{ + External: gcpProject, + }, + Location: proto.String(gcpZone), + // TODO(jreese) this will also need to be unique across all source namespaces + // Likely need to use cloud-init to set the desired hostname on the system, + // and will need our own DNS discovery solution deployed in the target + // project to be used as the nameserver. + BaseInstanceName: proto.String(fmt.Sprintf("deployment-%s-#", deployment.UID)), + InstanceTemplateRef: &kcccomputev1alpha1.ResourceRef{ + Namespace: instanceTemplate.Namespace, + Name: instanceTemplate.Name, + }, + + NamedPorts: namedPorts, + UpdatePolicy: &kcccomputev1beta1.InstancegroupmanagerUpdatePolicy{ + Type: proto.String("PROACTIVE"), + MinimalAction: proto.String("RESTART"), + MostDisruptiveAllowedAction: proto.String("RESTART"), + }, + }, + } + + logger.Info("creating instance group manager", "name", instanceGroupManager.Name) + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, deployment, instanceGroupManager, r.Scheme); err != nil { + return nil, fmt.Errorf("failed failed to set controller on instance group manager: %w", err) + } + + // Work around bug in generated struct having the wrong type for TargetSize + unstructuredInstanceGroupManager, err := k8sruntime.DefaultUnstructuredConverter.ToUnstructured(instanceGroupManager) + if err != nil { + return nil, fmt.Errorf("failed to convert instance group manager to unstructured type: %w", err) + } + + // Have to set maxReplicas as an int64 due to DeepCopy logic not handling + // int32s correctly. + maxReplicas := int64(deployment.Spec.ScaleSettings.MinReplicas) + if err := unstructured.SetNestedField(unstructuredInstanceGroupManager, maxReplicas, "spec", "targetSize"); err != nil { + return nil, fmt.Errorf("failed to set target size: %w", err) + } + + logger.Info("creating instance group manager for workload") + u := &unstructured.Unstructured{ + Object: unstructuredInstanceGroupManager, + } + u.SetGroupVersionKind(kcccomputev1beta1.ComputeInstanceGroupManagerGVK) + if err := r.InfraClient.Create(ctx, u); err != nil { + return nil, fmt.Errorf("failed to create instance group manager: %w", err) + } + + logger.Info( + "instance group manager created", + ) + + return u, nil + } else { + instanceTemplateName, ok, err := unstructured.NestedString(instanceGroupManager.Object, "spec", "instanceTemplateRef", "name") + if !ok || err != nil { + return nil, fmt.Errorf("failed to get instance template ref from instance group manager") + } + + if instanceTemplateName != instanceTemplate.Name { + logger.Info("updating instance group manager template", "template_name", instanceTemplate.Name) + if err := unstructured.SetNestedField(instanceGroupManager.Object, instanceTemplate.Name, "spec", "instanceTemplateRef", "name"); err != nil { + return nil, fmt.Errorf("failed setting instance template ref name: %w", err) + } + + if err := r.InfraClient.Update(ctx, &instanceGroupManager); err != nil { + return nil, fmt.Errorf("failed updating instance template for instance group manager: %w", err) + } + } + return &instanceGroupManager, nil + } +} + +func (r *WorkloadDeploymentReconciler) updateDeploymentStatus( + ctx context.Context, + logger logr.Logger, + availableCondition *metav1.Condition, + deployment *computev1alpha.WorkloadDeployment, + instanceGroupManager *unstructured.Unstructured, +) (ctrl.Result, error) { + var requeueAfter time.Duration + + currentActions, ok, err := unstructured.NestedMap(instanceGroupManager.Object, "status", "currentActions") + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to get instance group manager current actions: %w", err) + } else if !ok { + // Status has not been populated yet + return ctrl.Result{}, nil + } else { + totalInstances := int64(0) + stableInstances := int64(0) + for action, v := range currentActions { + i, ok := v.(int64) + if !ok { + return ctrl.Result{}, fmt.Errorf("unexpected type for action %s: %T", action, v) + } + totalInstances += i + if action == "none" { + stableInstances = i + } + } + + deployment.Status.Replicas = int32(totalInstances) + + deployment.Status.CurrentReplicas = int32(totalInstances) + + deployment.Status.DesiredReplicas = deployment.Spec.ScaleSettings.MinReplicas + + // TODO(jreese) derive a Ready condition if we can based on instances with + // a Ready condition. We'd need some way to drive that value from instance + // observations, though. + + if stableInstances < 1 { + logger.Info("no stable instances found") + availableCondition.Reason = "NoStableInstanceFound" + availableCondition.Message = "No stable instances found" + + // Manipulate a label on the ComputeInstanceGroupManager so that the + // KCC controller reconciles the entity. We could alternatively set the + // `cnrm.cloud.google.com/reconcile-interval-in-seconds` annotation, but + // this approach allows for more fine grained control of forced + // reconciliation, and avoids problems with multiple controllers wanting + // to influence reconciliation of a KCC resource. + // + // An annotation was originally attempted, but it did not result in a + // refresh of the instance group resource + + const timestampLabel = "compute.datumapis.com/deployment-reconciler-ts" + + groupManagerLabels := instanceGroupManager.GetLabels() + if groupManagerLabels == nil { + groupManagerLabels = map[string]string{} + } + + groupManagerTimestampUpdateRequired := false + if lastTime, ok := groupManagerLabels[timestampLabel]; ok { + t, err := strconv.ParseInt(lastTime, 10, 64) + if err != nil || time.Since(time.Unix(t, 0)) > 10*time.Second { + // If we get an error, it's likely a result of an outside influence, + // so override the value. + groupManagerTimestampUpdateRequired = true + } + } else { + groupManagerTimestampUpdateRequired = true + } + + if groupManagerTimestampUpdateRequired { + groupManagerLabels[timestampLabel] = strconv.FormatInt(metav1.Now().Unix(), 10) + logger.Info("updating reconciler timestamp label on instance group manager") + instanceGroupManager.SetLabels(groupManagerLabels) + if err := r.InfraClient.Update(ctx, instanceGroupManager); err != nil { + return ctrl.Result{}, fmt.Errorf("failed updating instance group manager to update label: %w", err) + } + } + requeueAfter = 10 * time.Second + + } else { + availableCondition.Status = metav1.ConditionTrue + availableCondition.Reason = "StableInstanceFound" + availableCondition.Message = "At least one stable instances was found" + } + } + + return ctrl.Result{RequeueAfter: requeueAfter}, nil +} + +func (r *WorkloadDeploymentReconciler) checkInstanceGroupManagerReadiness( + logger logr.Logger, + availableCondition *metav1.Condition, + instanceGroupManager *unstructured.Unstructured, +) (bool, error) { + conditions, ok, err := unstructured.NestedSlice(instanceGroupManager.Object, "status", "conditions") + if err != nil { + return false, fmt.Errorf("failed to get instance group manager status conditions: %w", err) + } else if !ok { + logger.Info("instance group manager not ready yet") + availableCondition.Reason = "InstanceGroupManagerNotReady" + return false, nil + } else { + for _, c := range conditions { + cond := c.(map[string]interface{}) + if cond["type"].(string) == kcccomputev1alpha1.ReadyConditionType && + cond["status"].(string) != "True" { + logger.Info("instance group manager not ready yet") + + availableCondition.Reason = "InstanceGroupManagerNotReady" + return false, nil + } + } + } + return true, nil +} + +func (r *WorkloadDeploymentReconciler) Finalize( + ctx context.Context, + obj client.Object, +) (finalizer.Result, error) { + deployment := obj.(*computev1alpha.WorkloadDeployment) + + // Delete child entities in a sequence that does not result in exponential + // backoffs of deletion attempts that occurs when they're all deleted by GC. + instanceGroupManagerName := fmt.Sprintf("deployment-%s", deployment.UID) + + var instanceGroupManager unstructured.Unstructured + instanceGroupManager.SetGroupVersionKind(kcccomputev1beta1.ComputeInstanceGroupManagerGVK) + instanceGroupManagerObjectKey := client.ObjectKey{ + Namespace: r.InfraClusterNamespaceName, + Name: instanceGroupManagerName, + } + if err := r.InfraClient.Get(ctx, instanceGroupManagerObjectKey, &instanceGroupManager); client.IgnoreNotFound(err) != nil { + return finalizer.Result{}, fmt.Errorf("failed fetching instance group manager: %w", err) + } + + if t := instanceGroupManager.GetCreationTimestamp(); !t.IsZero() { + if dt := instanceGroupManager.GetDeletionTimestamp(); dt.IsZero() { + if err := r.InfraClient.Delete(ctx, &instanceGroupManager); err != nil { + return finalizer.Result{}, fmt.Errorf("failed deleting instance group manager: %w", err) + } + } + } + + var instanceTemplates kcccomputev1beta1.ComputeInstanceTemplateList + if err := r.InfraClient.List( + ctx, + &instanceTemplates, + []client.ListOption{ + client.InNamespace(r.InfraClusterNamespaceName), + client.MatchingLabels{ + deploymentUIDLabel: string(deployment.UID), + }, + }..., + ); err != nil { + return finalizer.Result{}, fmt.Errorf("unable to list instance templates: %w", err) + } + + for _, instanceTemplate := range instanceTemplates.Items { + if err := r.InfraClient.Delete(ctx, &instanceTemplate); err != nil { + return finalizer.Result{}, fmt.Errorf("failed to delete instance template: %w", err) + } + } + + // Allow GC to remove the following: + // + // - Deployment specific service account + // - Deployment specific secret related entities + // - Interface specific firewall rules + + if err := crossclusterutil.DeleteAnchorForObject(ctx, r.Client, r.InfraClient, deployment, r.InfraClusterNamespaceName); err != nil { + return finalizer.Result{}, fmt.Errorf("failed deleting instance group manager anchor: %w", err) + } + + return finalizer.Result{}, nil +} diff --git a/internal/controller/workloadgateway_controller.go b/internal/controller/workloadgateway_controller.go new file mode 100644 index 0000000..63fe9b2 --- /dev/null +++ b/internal/controller/workloadgateway_controller.go @@ -0,0 +1,782 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "time" + + kcccomputev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/compute/v1beta1" + kcccomputev1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" + "github.com/go-logr/logr" + "google.golang.org/protobuf/proto" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/finalizer" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/source" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + + "go.datum.net/infra-provider-gcp/internal/controller/k8sconfigconnector" + "go.datum.net/infra-provider-gcp/internal/crossclusterutil" + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" + computev1alpha "go.datum.net/workload-operator/api/v1alpha" +) + +const gcpWorkloadFinalizer = "compute.datumapis.com/gcp-workload-controller" + +// TODO(jreese) move to indexer package in workload-operator that can be used +const deploymentWorkloadUID = "spec.workloadRef.uid" + +// WorkloadGatewayReconciler reconciles a Workload object and processes any +// gateways defined. +type WorkloadGatewayReconciler struct { + client.Client + InfraClient client.Client + Scheme *runtime.Scheme + GCPProject string + InfraClusterNamespaceName string + + finalizers finalizer.Finalizers +} + +// +kubebuilder:rbac:groups=compute.datumapis.com,resources=workloads,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.datumapis.com,resources=workloads/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=compute.datumapis.com,resources=workloads/finalizers,verbs=update + +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computeaddresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computeaddresses/status,verbs=get +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computefirewalls,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computefirewalls/status,verbs=get +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computehealthchecks,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computehealthchecks/status,verbs=get +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computebackendservices,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computebackendservices/status,verbs=get +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computetargettcpproxies,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computetargettcpproxies/status,verbs=get +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computeforwardingrules,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=compute.cnrm.cloud.google.com,resources=computeforwardingrules/status,verbs=get + +func (r *WorkloadGatewayReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + var workload computev1alpha.Workload + if err := r.Client.Get(ctx, req.NamespacedName, &workload); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + finalizationResult, err := r.finalizers.Finalize(ctx, &workload) + if err != nil { + if v, ok := err.(kerrors.Aggregate); ok && v.Is(resourceIsDeleting) { + logger.Info("resources are still deleting, requeuing") + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil + } else { + return ctrl.Result{}, fmt.Errorf("failed to finalize: %w", err) + } + } + if finalizationResult.Updated { + if err := r.Client.Update(ctx, &workload); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update based on finalization result: %w", err) + } + return ctrl.Result{}, nil + } + + if !workload.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + logger.Info("reconciling workload") + defer logger.Info("reconcile complete") + + return ctrl.Result{}, r.reconcileWorkloadGateway(ctx, logger, &workload) +} + +func (r *WorkloadGatewayReconciler) reconcileWorkloadGateway( + ctx context.Context, + logger logr.Logger, + workload *computev1alpha.Workload, +) error { + if gateway := workload.Spec.Gateway; gateway == nil { + return nil + } + + logger.Info("gateway definition found") + + // TODO(jreese) have different provisioners for each gateway class, move this + // code out. + + // TODO(jreese) break the gateway out into a WorkloadGateway resource that can + // be individually reconciled, so that we don't have multiple controllers + // trying to update the workload spec. A separate reconciler should be + // responsible for observing relevant dependent resources to determine the + // workload status. + + // TODO(jreese) handle multiple listeners + + if len(workload.Spec.Gateway.Template.Spec.Listeners) == 0 { + return fmt.Errorf("no listeners found on gateway") + } + + listener := workload.Spec.Gateway.Template.Spec.Listeners[0] + + backendPorts := getGatewayBackendPorts(workload) + + // 1. Get an IP address for the load balancer + // TODO(jreese) ipv6 + address, err := r.reconcileGatewayAddress(ctx, logger, workload) + if err != nil { + return err + } + + if workload.Status.Gateway == nil { + workload.Status.Gateway = &computev1alpha.WorkloadGatewayStatus{} + } + + if !k8sconfigconnector.IsStatusConditionTrue(address.Status.Conditions, kcccomputev1alpha1.ReadyConditionType) { + logger.Info("address not ready yet") + _ = apimeta.SetStatusCondition(&workload.Status.Gateway.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "ListenerAddressNotReady", + ObservedGeneration: workload.Generation, + Message: "Listener address is not Ready", + }) + + if err := r.Client.Status().Update(ctx, workload); err != nil { + return fmt.Errorf("failed to update workload gateway status: %w", err) + } + + return nil + } + + if len(workload.Status.Gateway.Addresses) == 0 { + addressType := gatewayv1.AddressType("IPAddress") + workload.Status.Gateway.Addresses = []gatewayv1.GatewayStatusAddress{ + { + Type: &addressType, + Value: *address.Status.ObservedState.Address, + }, + } + if err := r.Client.Status().Update(ctx, workload); err != nil { + return fmt.Errorf("failed to update workload gateway status: %w", err) + } + + return nil + } + + // 2. Create firewall rule to allow the load balancer to reach the backends. + // + // In the current configuration, load balancing will only direct traffic to + // the primary interface. A more complex move to network endpoint groups + // would be required to support load balancing into other interfaces. Given + // this, we'll only enable the firewall rule on the network that the first + // interface is attached to. + + if _, err := r.reconcileGatewayLBFirewall(ctx, logger, workload, backendPorts); err != nil { + return err + } + + // 3. Create external load balancers for the backend ports + // TODO(jreese) make sure that multiple backend services can reuse the same + // address on different ports. + + if err := r.reconcileGatewayBackendServices(ctx, logger, workload, backendPorts, address, int32(listener.Port)); err != nil { + return err + } + + return nil +} + +func (r *WorkloadGatewayReconciler) reconcileGatewayAddress( + ctx context.Context, + logger logr.Logger, + workload *computev1alpha.Workload, +) (kcccomputev1beta1.ComputeAddress, error) { + + addressName := fmt.Sprintf("workload-gw-%s", workload.UID) + var address kcccomputev1beta1.ComputeAddress + addressObjectKey := client.ObjectKey{ + Namespace: workload.Namespace, + Name: addressName, + } + if err := r.InfraClient.Get(ctx, addressObjectKey, &address); client.IgnoreNotFound(err) != nil { + return address, fmt.Errorf("failed fetching IP address: %w", err) + } + + if address.CreationTimestamp.IsZero() { + logger.Info("creating global IP address") + address := kcccomputev1beta1.ComputeAddress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: workload.Namespace, + Name: addressName, + Annotations: map[string]string{ + GCPProjectAnnotation: r.GCPProject, + }, + }, + Spec: kcccomputev1beta1.ComputeAddressSpec{ + Location: "global", + // TODO(jreese) support internal load balancers too - would need to + // define multiple gateways on the workload. + AddressType: proto.String("EXTERNAL"), + IpVersion: proto.String("IPV4"), + // Required for global load balancers + NetworkTier: proto.String("PREMIUM"), + }, + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, workload, &address, r.Scheme); err != nil { + return address, fmt.Errorf("failed failed to set owner on IP address: %w", err) + } + + if err := r.InfraClient.Create(ctx, &address); err != nil { + return address, fmt.Errorf("failed to create IP address: %w", err) + } + } + + return address, nil +} + +func (r *WorkloadGatewayReconciler) reconcileGatewayLBFirewall( + ctx context.Context, + logger logr.Logger, + workload *computev1alpha.Workload, + backendPorts sets.Set[computev1alpha.NamedPort], +) (kcccomputev1beta1.ComputeFirewall, error) { + firewallName := fmt.Sprintf("workload-gw-hc-%s", workload.UID) + + var firewall kcccomputev1beta1.ComputeFirewall + firewallObjectKey := client.ObjectKey{ + Namespace: workload.Namespace, + Name: firewallName, + } + + if err := r.InfraClient.Get(ctx, firewallObjectKey, &firewall); client.IgnoreNotFound(err) != nil { + return firewall, fmt.Errorf("failed fetching firewall rule for LB backends: %w", err) + } + + if firewall.CreationTimestamp.IsZero() { + logger.Info("creating firewall rule for LB access", "firewall_rule", firewallName) + primaryNetworkInterface := workload.Spec.Template.Spec.NetworkInterfaces[0] + + var primaryNetwork networkingv1alpha.Network + primaryNetworkObjectKey := client.ObjectKey{ + Namespace: workload.Namespace, + Name: primaryNetworkInterface.Network.Name, + } + if err := r.InfraClient.Get(ctx, primaryNetworkObjectKey, &primaryNetwork); err != nil { + return firewall, fmt.Errorf("failed fetching network for primary network interface: %w", err) + } + + firewall := kcccomputev1beta1.ComputeFirewall{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: workload.Namespace, + Name: firewallName, + Annotations: map[string]string{ + GCPProjectAnnotation: r.GCPProject, + }, + }, + Spec: kcccomputev1beta1.ComputeFirewallSpec{ + Description: proto.String(fmt.Sprintf("gateway policy for workload-%s", workload.UID)), + Direction: proto.String("INGRESS"), + NetworkRef: kcccomputev1alpha1.ResourceRef{ + Namespace: workload.Namespace, + Name: fmt.Sprintf("network-%s", primaryNetwork.UID), + }, + Priority: proto.Int64(1000), // Default priority, but might want to adjust + SourceRanges: []string{ + // See https://cloud.google.com/load-balancing/docs/https#firewall-rules + // TODO(jreese) add ipv6 + "130.211.0.0/22", + "35.191.0.0/16", + }, + TargetTags: []string{ + fmt.Sprintf("workload-%s", workload.UID), + }, + }, + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, workload, &firewall, r.Scheme); err != nil { + return firewall, fmt.Errorf("failed failed to set owner on firewall: %w", err) + } + + for _, namedPort := range backendPorts.UnsortedList() { + ipProtocol := "tcp" + if namedPort.Protocol != nil { + ipProtocol = strings.ToLower(string(*namedPort.Protocol)) + } + + firewall.Spec.Allow = append(firewall.Spec.Allow, kcccomputev1beta1.FirewallAllow{ + Protocol: ipProtocol, + Ports: []string{strconv.Itoa(int(namedPort.Port))}, + }) + } + + if err := r.InfraClient.Create(ctx, &firewall); err != nil { + return firewall, fmt.Errorf("failed to create gateway firewall rule: %w", err) + } + } + + return firewall, nil +} + +func (r *WorkloadGatewayReconciler) reconcileGatewayBackendServices( + ctx context.Context, + logger logr.Logger, + workload *computev1alpha.Workload, + backendPorts sets.Set[computev1alpha.NamedPort], + address kcccomputev1beta1.ComputeAddress, + listenerPort int32, +) (err error) { + readyCondition := metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "GatewayResourcesNotReady", + ObservedGeneration: workload.Generation, + Message: "Gateway resources are not ready", + } + + defer func() { + if err != nil { + // Don't update the status if errors are encountered + return + } + statusChanged := apimeta.SetStatusCondition(&workload.Status.Gateway.Conditions, readyCondition) + + if statusChanged { + err = r.Client.Status().Update(ctx, workload) + } + }() + + readyBackendServices := 0 + for _, namedPort := range backendPorts.UnsortedList() { + healthCheckName := fmt.Sprintf("workload-gw-hc-%s-%d", workload.UID, namedPort.Port) + + var healthCheck kcccomputev1beta1.ComputeHealthCheck + healthCheckObjectKey := client.ObjectKey{ + Namespace: workload.Namespace, + Name: healthCheckName, + } + if err := r.InfraClient.Get(ctx, healthCheckObjectKey, &healthCheck); client.IgnoreNotFound(err) != nil { + return fmt.Errorf("failed fetching health check: %w", err) + } + + if healthCheck.CreationTimestamp.IsZero() { + logger.Info("creating health check", "health_check", healthCheckName) + healthCheck = kcccomputev1beta1.ComputeHealthCheck{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: workload.Namespace, + Name: healthCheckName, + Annotations: map[string]string{ + GCPProjectAnnotation: r.GCPProject, + }, + }, + Spec: kcccomputev1beta1.ComputeHealthCheckSpec{ + Location: "global", + TcpHealthCheck: &kcccomputev1beta1.HealthcheckTcpHealthCheck{ + Port: proto.Int64(int64(namedPort.Port)), + }, + }, + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, workload, &healthCheck, r.Scheme); err != nil { + return fmt.Errorf("failed failed to set owner on health check: %w", err) + } + + if err := r.InfraClient.Create(ctx, &healthCheck); err != nil { + return fmt.Errorf("failed to create health check: %w", err) + } + + return nil + } + + if !k8sconfigconnector.IsStatusConditionTrue(healthCheck.Status.Conditions, kcccomputev1alpha1.ReadyConditionType) { + readyCondition.Reason = "HealthCheckNotReady" + return nil + } + + // 4. Reconcile `backend-service` load balancer for each named port. + backendServiceName := fmt.Sprintf("workload-%s-%s", workload.UID, namedPort.Name) + + backendService := &kcccomputev1beta1.ComputeBackendService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: workload.Namespace, + Name: backendServiceName, + }, + } + + backendServiceResult, err := r.reconcileBackendService(ctx, logger, workload, namedPort, &healthCheck, backendService) + if err != nil { + return fmt.Errorf("failed to create or update backend service: %w", err) + } + + if backendServiceResult != controllerutil.OperationResultNone { + logger.Info("backend service mutated", "result", backendServiceResult) + } + + if !k8sconfigconnector.IsStatusConditionTrue(backendService.Status.Conditions, kcccomputev1alpha1.ReadyConditionType) { + logger.Info("backend service not ready yet") + readyCondition.Reason = "BackendServiceNotReady" + return nil + } + + // 5. Create a "target tcp proxy" for the backend service + // TODO(jreese) probably need to use a hash + targetTCPProxyName := fmt.Sprintf("workload-gw-%s-%d", workload.UID, namedPort.Port) + + var targetTCPProxy kcccomputev1beta1.ComputeTargetTCPProxy + targetTCPProxyObjectKey := client.ObjectKey{ + Namespace: workload.Namespace, + Name: targetTCPProxyName, + } + + if err := r.InfraClient.Get(ctx, targetTCPProxyObjectKey, &targetTCPProxy); client.IgnoreNotFound(err) != nil { + return fmt.Errorf("failed fetching target TCP proxy: %w", err) + } + + if targetTCPProxy.CreationTimestamp.IsZero() { + logger.Info("creating target TCP proxy") + targetTCPProxy = kcccomputev1beta1.ComputeTargetTCPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: workload.Namespace, + Name: targetTCPProxyName, + Annotations: map[string]string{ + GCPProjectAnnotation: r.GCPProject, + }, + }, + Spec: kcccomputev1beta1.ComputeTargetTCPProxySpec{ + BackendServiceRef: kcccomputev1alpha1.ResourceRef{ + Namespace: backendService.Namespace, + Name: backendService.Name, + }, + ProxyHeader: proto.String("NONE"), + }, + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, workload, &targetTCPProxy, r.Scheme); err != nil { + return fmt.Errorf("failed failed to set owner on target TCP proxy: %w", err) + } + + if err := r.InfraClient.Create(ctx, &targetTCPProxy); err != nil { + return fmt.Errorf("failed to create target TCP proxy: %w", err) + } + + return nil + } + + if !k8sconfigconnector.IsStatusConditionTrue(targetTCPProxy.Status.Conditions, kcccomputev1alpha1.ReadyConditionType) { + logger.Info("target TCP proxy not ready yet") + readyCondition.Reason = "TargetTCPProxyNotReady" + return nil + } + + // 6. Create a forwarding rule for the address and port toward the TCP LB + forwardingRuleName := fmt.Sprintf("workload-gw-%s-%d", workload.UID, namedPort.Port) + + var forwardingRule kcccomputev1beta1.ComputeForwardingRule + forwardingRuleObjectKey := client.ObjectKey{ + Namespace: workload.Namespace, + Name: forwardingRuleName, + } + + if err := r.InfraClient.Get(ctx, forwardingRuleObjectKey, &forwardingRule); client.IgnoreNotFound(err) != nil { + return fmt.Errorf("failed fetching forwarding rule for TCP proxy: %w", err) + } + + if forwardingRule.CreationTimestamp.IsZero() { + logger.Info("creating forwarding rule", "forwarding_rule", forwardingRuleName) + + forwardingRule := kcccomputev1beta1.ComputeForwardingRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: workload.Namespace, + Name: forwardingRuleName, + Annotations: map[string]string{ + GCPProjectAnnotation: r.GCPProject, + }, + }, + Spec: kcccomputev1beta1.ComputeForwardingRuleSpec{ + Location: "global", + LoadBalancingScheme: proto.String("EXTERNAL_MANAGED"), + Target: &kcccomputev1beta1.ForwardingruleTarget{ + TargetTCPProxyRef: &kcccomputev1alpha1.ResourceRef{ + Namespace: targetTCPProxy.Namespace, + Name: targetTCPProxy.Name, + }, + }, + NetworkTier: proto.String("PREMIUM"), + IpProtocol: proto.String("TCP"), + IpAddress: &kcccomputev1beta1.ForwardingruleIpAddress{ + AddressRef: &kcccomputev1alpha1.ResourceRef{ + Namespace: address.Namespace, + Name: address.Name, + }, + }, + PortRange: proto.String(fmt.Sprintf("%d-%d", listenerPort, listenerPort)), + }, + } + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, workload, &forwardingRule, r.Scheme); err != nil { + return fmt.Errorf("failed failed to set owner on forwarding rule: %w", err) + } + + if err := r.InfraClient.Create(ctx, &forwardingRule); err != nil { + return fmt.Errorf("failed to create forwarding rule for TCP proxy: %w", err) + } + } + + if !k8sconfigconnector.IsStatusConditionTrue(forwardingRule.Status.Conditions, kcccomputev1alpha1.ReadyConditionType) { + logger.Info("forwarding rule not ready yet") + readyCondition.Reason = "ForwardingRuleNotReady" + return nil + } + + logger.Info("forwarding rule is ready") + readyBackendServices++ + } + + if readyBackendServices == len(backendPorts) { + readyCondition.Reason = "GatewayResourcesReady" + readyCondition.Message = "All gateway resources ready" + readyCondition.Status = metav1.ConditionTrue + } + + return nil +} + +func (r *WorkloadGatewayReconciler) reconcileBackendService( + ctx context.Context, + logger logr.Logger, + workload *computev1alpha.Workload, + namedPort computev1alpha.NamedPort, + healthCheck *kcccomputev1beta1.ComputeHealthCheck, + backendService *kcccomputev1beta1.ComputeBackendService, +) (controllerutil.OperationResult, error) { + return controllerutil.CreateOrUpdate(ctx, r.InfraClient, backendService, func() error { + if backendService.CreationTimestamp.IsZero() { + logger.Info("creating backend service") + } else { + logger.Info("updating backend service") + } + + // Add a backend to the backend service for each workload deployment found + listOpts := client.MatchingFields{ + deploymentWorkloadUID: string(workload.UID), + } + var deployments computev1alpha.WorkloadDeploymentList + if err := r.Client.List(ctx, &deployments, listOpts); err != nil { + return fmt.Errorf("failed to list worklaod deployments: %w", err) + } + + if len(deployments.Items) == 0 { + logger.Info("no workload deployments found") + return nil + } + + var backends []kcccomputev1beta1.BackendserviceBackend + + for _, deployment := range deployments.Items { + + // KCC can't point to a ComputeInstanceGroupManager, even with the Kind + // field being set in the InstanceGroupRef, so we need to look them up. + + var instanceGroupManager unstructured.Unstructured + instanceGroupManager.SetGroupVersionKind(kcccomputev1beta1.ComputeInstanceGroupManagerGVK) + instanceGroupManagerObjectKey := client.ObjectKey{ + Namespace: workload.Namespace, + Name: fmt.Sprintf("deployment-%s", deployment.UID), + } + if err := r.InfraClient.Get(ctx, instanceGroupManagerObjectKey, &instanceGroupManager); err != nil { + return fmt.Errorf("failed fetching instance group manager for deployment: %w", err) + } + + instanceGroup, ok, err := unstructured.NestedString(instanceGroupManager.Object, "status", "instanceGroup") + if err != nil { + return fmt.Errorf("failed reading instance group from instance group manager: %w", err) + } else if !ok { + return fmt.Errorf("did not find instance group in instance group manager status") + } + + backend := kcccomputev1beta1.BackendserviceBackend{ + BalancingMode: proto.String("UTILIZATION"), + MaxUtilization: proto.Float64(.8), + + Group: kcccomputev1beta1.BackendserviceGroup{ + InstanceGroupRef: &kcccomputev1alpha1.ResourceRef{ + External: instanceGroup, + }, + }, + } + + backends = append(backends, backend) + } + + if backendService.Annotations == nil { + backendService.Annotations = map[string]string{ + GCPProjectAnnotation: r.GCPProject, + } + } + + backendService.Spec.Location = "global" + backendService.Spec.LoadBalancingScheme = proto.String("EXTERNAL_MANAGED") + backendService.Spec.Protocol = proto.String("TCP") + // TODO(jreese) ipv6 support + // TODO(jreese) the following field doesn't exist in the struct, do we need + // it? + // IpAddressSelectionPolicy: "IPV4_ONLY", + // TODO(jreese) allow tweaking this. Possibly from readiness probe definitions? + backendService.Spec.TimeoutSec = proto.Int64(300) + backendService.Spec.PortName = proto.String(namedPort.Name) + backendService.Spec.HealthChecks = []kcccomputev1beta1.BackendserviceHealthChecks{ + { + HealthCheckRef: &kcccomputev1alpha1.ResourceRef{ + Namespace: workload.Namespace, + Name: healthCheck.Name, + }, + }, + } + + backendService.Spec.Backend = backends + + if err := crossclusterutil.SetControllerReference(ctx, r.InfraClient, workload, backendService, r.Scheme); err != nil { + return fmt.Errorf("failed failed to set owner on backend service: %w", err) + } + + return nil + }) +} + +var resourceIsDeleting = errors.New("resource is deleting") + +func (r *WorkloadGatewayReconciler) Finalize( + ctx context.Context, + obj client.Object, +) (finalizer.Result, error) { + // workload := obj.(*computev1alpha.Workload) + + // TODO(jreese) Delete child entities in a sequence that does not result in + // exponential backoffs of deletion attempts that occurs when they're all + // deleted by GC. + // + // Make sure to update the status conditions + + if err := crossclusterutil.DeleteAnchorForObject(ctx, r.Client, r.InfraClient, obj, r.InfraClusterNamespaceName); err != nil { + return finalizer.Result{}, fmt.Errorf("failed deleting instance group manager anchor: %w", err) + } + + return finalizer.Result{}, nil +} + +func getGatewayBackendPorts(workload *computev1alpha.Workload) sets.Set[computev1alpha.NamedPort] { + runtime := workload.Spec.Template.Spec.Runtime + namedPorts := map[string]computev1alpha.NamedPort{} + if runtime.Sandbox != nil { + for _, c := range runtime.Sandbox.Containers { + for _, namedPort := range c.Ports { + namedPorts[namedPort.Name] = namedPort + } + + } + } + + if runtime.VirtualMachine != nil { + for _, namedPort := range runtime.VirtualMachine.Ports { + namedPorts[namedPort.Name] = namedPort + } + } + + backendPorts := sets.Set[computev1alpha.NamedPort]{} + + for _, tcpRoute := range workload.Spec.Gateway.TCPRoutes { + for _, rule := range tcpRoute.Rules { + for _, backendRef := range rule.BackendRefs { + // Consider looking to see if backendRef.Port is set, if we end up + // not forcing users to leverage a named port. + if namedPort, ok := namedPorts[string(backendRef.Name)]; !ok { + panic("did not find named port for backend ref") + } else { + backendPorts.Insert(namedPort) + } + } + } + } + return backendPorts +} + +// SetupWithManager sets up the controller with the Manager. +func (r *WorkloadGatewayReconciler) SetupWithManager(mgr ctrl.Manager, infraCluster cluster.Cluster) error { + + r.finalizers = finalizer.NewFinalizers() + if err := r.finalizers.Register(gcpWorkloadFinalizer, r); err != nil { + return fmt.Errorf("failed to register finalizer: %w", err) + } + + // TODO(jreese) move to indexer package + + err := mgr.GetFieldIndexer().IndexField(context.Background(), &computev1alpha.WorkloadDeployment{}, deploymentWorkloadUID, func(o client.Object) []string { + return []string{ + string(o.(*computev1alpha.WorkloadDeployment).Spec.WorkloadRef.UID), + } + }) + if err != nil { + return fmt.Errorf("failed to add workload deployment field indexer: %w", err) + } + + // Watch the unstructured form of an instance group manager, as the generated + // types are not aligned with the actual CRD. + var instanceGroupManager unstructured.Unstructured + instanceGroupManager.SetGroupVersionKind(kcccomputev1beta1.ComputeInstanceGroupManagerGVK) + + return ctrl.NewControllerManagedBy(mgr). + For(&computev1alpha.Workload{}). + Owns(&computev1alpha.WorkloadDeployment{}). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &kcccomputev1beta1.ComputeAddress{}, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*kcccomputev1beta1.ComputeAddress](mgr.GetScheme(), &computev1alpha.Workload{}), + )). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &kcccomputev1beta1.ComputeFirewall{}, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*kcccomputev1beta1.ComputeFirewall](mgr.GetScheme(), &computev1alpha.Workload{}), + )). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &kcccomputev1beta1.ComputeHealthCheck{}, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*kcccomputev1beta1.ComputeHealthCheck](mgr.GetScheme(), &computev1alpha.Workload{}), + )). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &kcccomputev1beta1.ComputeBackendService{}, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*kcccomputev1beta1.ComputeBackendService](mgr.GetScheme(), &computev1alpha.Workload{}), + )). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &kcccomputev1beta1.ComputeTargetTCPProxy{}, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*kcccomputev1beta1.ComputeTargetTCPProxy](mgr.GetScheme(), &computev1alpha.Workload{}), + )). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &kcccomputev1beta1.ComputeForwardingRule{}, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*kcccomputev1beta1.ComputeForwardingRule](mgr.GetScheme(), &computev1alpha.Workload{}), + )). + WatchesRawSource(source.TypedKind( + infraCluster.GetCache(), + &instanceGroupManager, + crossclusterutil.TypedEnqueueRequestForUpstreamOwner[*unstructured.Unstructured](mgr.GetScheme(), &computev1alpha.Workload{}), + )). + Complete(r) +} diff --git a/internal/crossclusterutil/controllerutil.go b/internal/crossclusterutil/controllerutil.go new file mode 100644 index 0000000..4f5785a --- /dev/null +++ b/internal/crossclusterutil/controllerutil.go @@ -0,0 +1,137 @@ +package crossclusterutil + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// SetControllerReference assists with entity ownership across control planes. +// +// This function will create an "anchor" entity in the API server accessible via +// the provided client to represent the owner that exists in a separate API +// server. This is particularly useful for relying on garbage collection for +// entity destruction versus writing direct teardown logic. +// +// In addition, labels will be added to the controlled entity to identify the +// owner in the upstream control plane. These labels will be used by the +// TypedEnqueueRequestForUpstreamOwner handler to enqueue reconciliations. +func SetControllerReference( + ctx context.Context, + c client.Client, + owner, + controlled client.Object, + scheme *runtime.Scheme, + opts ...controllerutil.OwnerReferenceOption, +) error { + + if owner.GetNamespace() == "" || controlled.GetNamespace() == "" { + return fmt.Errorf("cluster scoped resource controllers are not supported") + } + + // For simplicity, we use a ConfigMap for an anchor. This may change to a + // separate type in the future if ConfigMap bloat causes an issue in caches. + + gvk, err := apiutil.GVKForObject(owner.(runtime.Object), scheme) + if err != nil { + return err + } + + anchorLabels := map[string]string{ + UpstreamOwnerGroupLabel: gvk.Group, + UpstreamOwnerKindLabel: gvk.Kind, + UpstreamOwnerNameLabel: owner.GetName(), + UpstreamOwnerNamespaceLabel: owner.GetNamespace(), + } + + listOpts := []client.ListOption{ + client.InNamespace(controlled.GetNamespace()), + client.MatchingLabels(anchorLabels), + } + + var configMaps corev1.ConfigMapList + if err := c.List(ctx, &configMaps, listOpts...); err != nil { + return fmt.Errorf("failed listing configmaps: %w", err) + } + + var anchorConfigMap corev1.ConfigMap + if len(configMaps.Items) == 0 { + // create configmap + anchorConfigMap = corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: controlled.GetNamespace(), + GenerateName: fmt.Sprintf("anchor-%s-", owner.GetName()), + Labels: anchorLabels, + }, + } + + if err := c.Create(ctx, &anchorConfigMap); err != nil { + return fmt.Errorf("failed creating anchor configmap: %w", err) + } + + } else if len(configMaps.Items) > 1 { + // Never expect this to happen, but better to stop doing any work if it does. + return fmt.Errorf("expected 1 anchor configmap, got: %d", len(configMaps.Items)) + } else { + anchorConfigMap = configMaps.Items[0] + } + + if err := controllerutil.SetOwnerReference(&anchorConfigMap, controlled, scheme, opts...); err != nil { + return fmt.Errorf("failed setting anchor owner reference: %w", err) + } + + labels := controlled.GetLabels() + if labels == nil { + labels = map[string]string{} + } + labels[UpstreamOwnerGroupLabel] = anchorLabels[UpstreamOwnerGroupLabel] + labels[UpstreamOwnerKindLabel] = anchorLabels[UpstreamOwnerKindLabel] + labels[UpstreamOwnerNameLabel] = anchorLabels[UpstreamOwnerNameLabel] + labels[UpstreamOwnerNamespaceLabel] = anchorLabels[UpstreamOwnerNamespaceLabel] + controlled.SetLabels(labels) + + return nil +} + +// DeleteAnchorForObject will delete the anchor configmap associated with the +// provided owner, which will help drive GC of other entities. +func DeleteAnchorForObject( + ctx context.Context, + upstreamClient client.Client, + infraClusterClient client.Client, + owner client.Object, + infraClusterNamespaceName string, +) error { + + listOpts := []client.ListOption{ + client.InNamespace(infraClusterNamespaceName), + client.MatchingLabels{ + UpstreamOwnerGroupLabel: owner.GetObjectKind().GroupVersionKind().Group, + UpstreamOwnerKindLabel: owner.GetObjectKind().GroupVersionKind().Kind, + UpstreamOwnerNameLabel: owner.GetName(), + UpstreamOwnerNamespaceLabel: owner.GetNamespace(), + }, + } + + var configMaps corev1.ConfigMapList + if err := infraClusterClient.List(ctx, &configMaps, listOpts...); err != nil { + return fmt.Errorf("failed listing configmaps: %w", err) + } + + if len(configMaps.Items) == 0 { + return nil + } + + if len(configMaps.Items) > 1 { + // Never expect this to happen, but better to stop doing any work if it does. + return fmt.Errorf("expected 1 anchor configmap, got: %d", len(configMaps.Items)) + } + + return infraClusterClient.Delete(ctx, &configMaps.Items[0]) +} diff --git a/internal/crossclusterutil/controllerutil_test.go b/internal/crossclusterutil/controllerutil_test.go new file mode 100644 index 0000000..21a6f72 --- /dev/null +++ b/internal/crossclusterutil/controllerutil_test.go @@ -0,0 +1,63 @@ +package crossclusterutil + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" +) + +func TestSetControllerReference(t *testing.T) { + ctx := context.TODO() + testScheme := scheme.Scheme + require.NoError(t, corev1.AddToScheme(testScheme)) // Register corev1 types + + // Create fake client + fakeClient := fake.NewClientBuilder(). + WithInterceptorFuncs(interceptor.Funcs{ + Create: func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.CreateOption) error { + if cm, ok := obj.(*corev1.ConfigMap); ok && cm.GenerateName != "" { + cm.Name = names.SimpleNameGenerator.GenerateName(cm.GenerateName) + } + return client.Create(ctx, obj, opts...) + }, + }). + WithScheme(testScheme). + Build() + + owner := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "upstream-owner", + Namespace: "test-owner-namespace", + UID: uuid.NewUUID(), + }, + } + controlled := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "controlled", + Namespace: "test-namespace", + UID: uuid.NewUUID(), + }, + } + + err := SetControllerReference(ctx, fakeClient, owner, controlled, testScheme) + require.NoError(t, err) + + // Validate owner reference + controlledOwnerReferences := controlled.GetOwnerReferences() + require.Len(t, controlledOwnerReferences, 1) + assert.Contains(t, controlledOwnerReferences[0].Name, owner.Name) + assert.Equal(t, "", controlled.Labels[UpstreamOwnerGroupLabel]) + assert.Equal(t, "ConfigMap", controlled.Labels[UpstreamOwnerKindLabel]) + assert.Equal(t, owner.Name, controlled.Labels[UpstreamOwnerNameLabel]) + assert.Equal(t, owner.Namespace, controlled.Labels[UpstreamOwnerNamespaceLabel]) +} diff --git a/internal/crossclusterutil/enqueue_upstream_owner.go b/internal/crossclusterutil/enqueue_upstream_owner.go new file mode 100644 index 0000000..0e4f249 --- /dev/null +++ b/internal/crossclusterutil/enqueue_upstream_owner.go @@ -0,0 +1,109 @@ +package crossclusterutil + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ handler.EventHandler = &enqueueRequestForOwner[client.Object]{} + +type empty struct{} + +// TypedEnqueueRequestForUpstreamOwner enqueues Requests for the upstream Owners of an object. +// +// This handler depends on the `compute.datumapis.com/upstream-namespace` label +// to exist on the resource for the event. +func TypedEnqueueRequestForUpstreamOwner[object client.Object](scheme *runtime.Scheme, ownerType client.Object) handler.TypedEventHandler[object, reconcile.Request] { + e := &enqueueRequestForOwner[object]{ + ownerType: ownerType, + } + if err := e.parseOwnerTypeGroupKind(scheme); err != nil { + panic(err) + } + return e +} + +type enqueueRequestForOwner[object client.Object] struct { + // ownerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. + ownerType runtime.Object + + // groupKind is the cached Group and Kind from OwnerType + groupKind schema.GroupKind +} + +// Create implements EventHandler. +func (e *enqueueRequestForOwner[object]) Create(ctx context.Context, evt event.TypedCreateEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Update implements EventHandler. +func (e *enqueueRequestForOwner[object]) Update(ctx context.Context, evt event.TypedUpdateEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.ObjectOld, reqs) + e.getOwnerReconcileRequest(evt.ObjectNew, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Delete implements EventHandler. +func (e *enqueueRequestForOwner[object]) Delete(ctx context.Context, evt event.TypedDeleteEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Generic implements EventHandler. +func (e *enqueueRequestForOwner[object]) Generic(ctx context.Context, evt event.TypedGenericEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false +// if the OwnerType could not be parsed using the scheme. +func (e *enqueueRequestForOwner[object]) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { + // Get the kinds of the type + kinds, _, err := scheme.ObjectKinds(e.ownerType) + if err != nil { + return err + } + // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. + if len(kinds) != 1 { + return fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.ownerType, kinds) + } + // Cache the Group and Kind for the OwnerType + e.groupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind} + return nil +} + +// getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile +// owners of object that match e.OwnerType. +func (e *enqueueRequestForOwner[object]) getOwnerReconcileRequest(obj metav1.Object, result map[reconcile.Request]empty) { + labels := obj.GetLabels() + if labels[UpstreamOwnerKindLabel] == e.groupKind.Kind && labels[UpstreamOwnerGroupLabel] == e.groupKind.Group { + request := reconcile.Request{NamespacedName: types.NamespacedName{ + Name: labels[UpstreamOwnerNameLabel], + Namespace: labels[UpstreamOwnerNamespaceLabel], + }} + result[request] = empty{} + } +} diff --git a/internal/crossclusterutil/labels.go b/internal/crossclusterutil/labels.go new file mode 100644 index 0000000..a56e972 --- /dev/null +++ b/internal/crossclusterutil/labels.go @@ -0,0 +1,8 @@ +package crossclusterutil + +const ( + UpstreamOwnerGroupLabel = "compute.datumapis.com/upstream-group" + UpstreamOwnerKindLabel = "compute.datumapis.com/upstream-kind" + UpstreamOwnerNameLabel = "compute.datumapis.com/upstream-name" + UpstreamOwnerNamespaceLabel = "compute.datumapis.com/upstream-namespace" +) diff --git a/internal/crossclusterutil/namespace.go b/internal/crossclusterutil/namespace.go new file mode 100644 index 0000000..b01fbc4 --- /dev/null +++ b/internal/crossclusterutil/namespace.go @@ -0,0 +1,22 @@ +package crossclusterutil + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func InfraClusterNamespaceName(ns corev1.Namespace) string { + return fmt.Sprintf("ns-%s", ns.UID) +} + +func InfraClusterNamespaceNameFromUpstream(ctx context.Context, c client.Client, name string) (string, error) { + var ns corev1.Namespace + if err := c.Get(ctx, client.ObjectKey{Name: name}, &ns); err != nil { + return "", fmt.Errorf("failed fetching upstream namespace: %w", err) + } + + return InfraClusterNamespaceName(ns), nil +} diff --git a/internal/locationutil/location.go b/internal/locationutil/location.go new file mode 100644 index 0000000..26be63e --- /dev/null +++ b/internal/locationutil/location.go @@ -0,0 +1,38 @@ +package locationutil + +import ( + "context" + "fmt" + + "sigs.k8s.io/controller-runtime/pkg/client" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +// GetLocation returns the location for the provided location reference, and +// whether or not the resource associated with the location should be processed. +func GetLocation( + ctx context.Context, + c client.Client, + locationRef networkingv1alpha.LocationReference, + locationClassName string, +) (*networkingv1alpha.Location, bool, error) { + var location networkingv1alpha.Location + locationObjectKey := client.ObjectKey{ + Namespace: locationRef.Namespace, + Name: locationRef.Name, + } + if err := c.Get(ctx, locationObjectKey, &location); err != nil { + return nil, false, fmt.Errorf("failed fetching location: %w", err) + } + + if location.Spec.Provider.GCP == nil { + return &location, false, nil + } + + if len(locationClassName) == 0 { + return &location, true, nil + } + + return &location, location.Spec.LocationClassName == locationClassName, nil +} diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go new file mode 100644 index 0000000..d3d7878 --- /dev/null +++ b/test/e2e/e2e_suite_test.go @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package e2e + +import ( + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Run e2e tests using the Ginkgo runner. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + _, _ = fmt.Fprintf(GinkgoWriter, "Starting infra-provider-gcp suite\n") + RunSpecs(t, "e2e suite") +} diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go new file mode 100644 index 0000000..ce05b87 --- /dev/null +++ b/test/e2e/e2e_test.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package e2e + +import ( + "fmt" + "os/exec" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "go.datum.net/infra-provider-gcp/test/utils" +) + +const namespace = "infra-provider-gcp-system" + +var _ = Describe("controller", Ordered, func() { + BeforeAll(func() { + By("installing prometheus operator") + Expect(utils.InstallPrometheusOperator()).To(Succeed()) + + By("installing the cert-manager") + Expect(utils.InstallCertManager()).To(Succeed()) + + By("creating manager namespace") + cmd := exec.Command("kubectl", "create", "ns", namespace) + _, _ = utils.Run(cmd) + }) + + AfterAll(func() { + By("uninstalling the Prometheus manager bundle") + utils.UninstallPrometheusOperator() + + By("uninstalling the cert-manager bundle") + utils.UninstallCertManager() + + By("removing manager namespace") + cmd := exec.Command("kubectl", "delete", "ns", namespace) + _, _ = utils.Run(cmd) + }) + + Context("Operator", func() { + It("should run successfully", func() { + var controllerPodName string + var err error + + // projectimage stores the name of the image used in the example + var projectimage = "example.com/infra-provider-gcp:v0.0.1" + + By("building the manager(Operator) image") + cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("loading the the manager(Operator) image on Kind") + err = utils.LoadImageToKindClusterWithName(projectimage) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("installing CRDs") + cmd = exec.Command("make", "install") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("deploying the controller-manager") + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectimage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func() error { + // Get pod name + + cmd = exec.Command("kubectl", "get", + "pods", "-l", "control-plane=controller-manager", + "-o", "go-template={{ range .items }}"+ + "{{ if not .metadata.deletionTimestamp }}"+ + "{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", namespace, + ) + + podOutput, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + podNames := utils.GetNonEmptyLines(string(podOutput)) + if len(podNames) != 1 { + return fmt.Errorf("expect 1 controller pods running, but got %d", len(podNames)) + } + controllerPodName = podNames[0] + ExpectWithOffset(2, controllerPodName).Should(ContainSubstring("controller-manager")) + + // Validate pod status + cmd = exec.Command("kubectl", "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", namespace, + ) + status, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + if string(status) != "Running" { + return fmt.Errorf("controller pod in %s status", status) + } + return nil + } + EventuallyWithOffset(1, verifyControllerUp, time.Minute, time.Second).Should(Succeed()) + + }) + }) +}) diff --git a/test/utils/utils.go b/test/utils/utils.go new file mode 100644 index 0000000..27b53ad --- /dev/null +++ b/test/utils/utils.go @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package utils + +import ( + "fmt" + "os" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" //nolint:golint,revive +) + +const ( + prometheusOperatorVersion = "v0.72.0" + prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" + + "releases/download/%s/bundle.yaml" + + certmanagerVersion = "v1.14.4" + certmanagerURLTmpl = "https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml" +) + +func warnError(err error) { + _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) +} + +// InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. +func InstallPrometheusOperator() error { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "create", "-f", url) + _, err := Run(cmd) + return err +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) ([]byte, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + _, _ = fmt.Fprintf(GinkgoWriter, "running: %s\n", command) + output, err := cmd.CombinedOutput() + if err != nil { + return output, fmt.Errorf("%s failed with error: (%v) %s", command, err, string(output)) + } + + return output, nil +} + +// UninstallPrometheusOperator uninstalls the prometheus +func UninstallPrometheusOperator() { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// UninstallCertManager uninstalls the cert manager +func UninstallCertManager() { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// InstallCertManager installs the cert manager bundle. +func InstallCertManager() error { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "apply", "-f", url) + if _, err := Run(cmd); err != nil { + return err + } + // Wait for cert-manager-webhook to be ready, which can take time if cert-manager + // was re-installed after uninstalling on a cluster. + cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", + "--for", "condition=Available", + "--namespace", "cert-manager", + "--timeout", "5m", + ) + + _, err := Run(cmd) + return err +} + +// LoadImageToKindClusterWithName loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + cluster := "kind" + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", name, "--name", cluster} + cmd := exec.Command("kind", kindOptions...) + _, err := Run(cmd) + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, err + } + wd = strings.Replace(wd, "/test/e2e", "", -1) + return wd, nil +}