diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index abc9d64d0..2160f518d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,6 +30,11 @@ jobs: go-version: [1.14.x] os: [ubuntu-latest, windows-latest] runs-on: ${{ matrix.os }} + env: + AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }} + AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} + AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} steps: - name: Install Go uses: actions/setup-go@v1 @@ -45,6 +50,12 @@ jobs: - name: Run generator CI (Linux) if: ${{ runner.os == 'Linux' }} run: make -C ./hack/generator ci + - name: Make generated controller integration tests (and unit tests) + if: ${{ runner.os == 'Linux' && env.AZURE_TENANT_ID != '' }} + run: make -C ./hack/generated test-int-cover + - name: Make generated controller test + if: ${{ runner.os == 'Linux' && env.AZURE_TENANT_ID == '' }} + run: make -C ./hack/generated test-cover - name: Test generator (Windows) if: ${{ runner.os == 'Windows' }} # Makefile not supported. Linux will run lints so diff --git a/Makefile b/Makefile index 5696fecb7..e4e3d5ad5 100644 --- a/Makefile +++ b/Makefile @@ -27,12 +27,12 @@ test test-int test-cover test-cover-int: export TEST_ASSET_ETCD = $(ETCD) test: $(KUBECTL) $(KUBE_APISERVER) $(ETCD) lint header-check ## Run tests $(GO) test -v ./... -test-int: .env $(KUBECTL) $(KUBE_APISERVER) $(ETCD) header-check lint ## Run integration tests +test-int: $(ROOT_DIR)/.env $(KUBECTL) $(KUBE_APISERVER) $(ETCD) header-check lint ## Run integration tests # MUST be executed as single command, or env vars will not propagate to test execution . .env && $(GO) test -v ./... -tags integration .env: ## create a service principal and save the identity to .env for use in integration tests (requries jq and az) - ./scripts/create_testing_creds.sh + $(SCRIPTS_DIR)/create_testing_creds.sh test-cover: $(KUBECTL) $(KUBE_APISERVER) $(ETCD) header-check lint ## Run tests w/ code coverage (./cover.out) $(GO) test ./... -coverprofile=cover.out -coverpkg=./... @@ -95,7 +95,7 @@ generate: manifests $(CONTROLLER_GEN) $(CONVERSION_GEN) ## Generate code ## -------------------------------------- .PHONY: tilt-up -tilt-up: kind-create .env ## start tilt and build kind cluster if needed +tilt-up: kind-create $(ROOT_DIR)/.env ## start tilt and build kind cluster if needed tilt up .PHONY: kind-reset diff --git a/hack/Makefile b/hack/Makefile new file mode 100644 index 000000000..489a4cb2d --- /dev/null +++ b/hack/Makefile @@ -0,0 +1,11 @@ +SHELL = /bin/bash + +V = 0 +Q = $(if $(filter 1,$V),,@) + +.PHONY: all +all: + $(Q) cd generator && make all + $(Q) cd generated && make all + + diff --git a/hack/generated/Dockerfile b/hack/generated/Dockerfile new file mode 100644 index 000000000..818a41624 --- /dev/null +++ b/hack/generated/Dockerfile @@ -0,0 +1,36 @@ +# Build the manager binary +FROM golang:1.13.15 as builder + +WORKDIR /workspace + +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +# COPY . ./ +COPY main.go main.go +COPY apis/ apis/ +COPY controllers/ controllers/ +COPY pkg/ pkg/ + +# Build +# TODO: Use Makefile here -- right now it's awkward to do so because: +# 1. tools.mk is required for the makefile from the above directory, but Dockerfile can only look in its directory and below. +# 2. Having Dockerfile here but building it from above could work except that there's another Dockerfile and a .dockerignore +# up above that break things. For now we just build by hand +# RUN make build + +# TODO: Do we want CGO_ENALBED=0 and the other options below in the makefile? +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o k8sinfra-controller main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/k8sinfra-controller . +USER nonroot:nonroot +ENTRYPOINT ["/k8sinfra-controller"] diff --git a/hack/generated/Makefile b/hack/generated/Makefile index 7478da680..2907f3863 100644 --- a/hack/generated/Makefile +++ b/hack/generated/Makefile @@ -2,9 +2,8 @@ SHELL = /bin/bash PACKAGE = github.com/Azure/k8s-infra/hack/generated APP = k8sinfra-controller -timestamp := $(shell /bin/date "+%Y%m%d-%H%M%S") -# CONFIG_REGISTRY = kind-registry:5000/fake/k8s-infra-controller:latest -IMG ?= k8s-infra-generated-contoller:$(timestamp) +CONFIG_REGISTRY = kind-registry:5000/fake/k8s-infra-controller:latest +IMG ?= k8s-infra-generated-contoller:latest KIND_CLUSTER_NAME = k8sinfra-generated include ../../tools.mk @@ -13,18 +12,23 @@ CRD_OPTIONS ?= "crd:crdVersions=v1,allowDangerousTypes=true" GO_DIRS := $(shell $(GO) list -f '{{.Dir}}' ./...) # We exclude the apis folder because it's really large and test discovery takes a good amount of time (>10s) GO_DIRS_TO_TEST := $(shell $(GO) list -f '{{.Dir}}' ./... | grep -v /apis/) +CONTROLLER_DEBUG_LOGLEVEL := 4 V = 0 Q = $(if $(filter 1,$V),,@) .PHONY: all -all: generate header-check fmt build test +all: generate header-check fmt build test test-int # TODO: Do we want to remove test-int from the all target so it's not run every time locally? # There is a ci specific target because we want the CI pass to fail if # the code has not been go fmt-ed, whereas locally we want "make all" # to just format the code for you .PHONY: ci -ci: generate build test-cover +ci: generate build # test-cover or test-cover-int will be called by the CI job directly + +## -------------------------------------- +## Build +## -------------------------------------- .PHONY: lint lint: $(GOLANGCI_LINT) ; $(info $(M) running golangci configured linters…) ## Lint codebase @@ -42,24 +46,13 @@ fmt: ; $(info $(M) running gofmt…) @ ## Run gofmt on all source files tidy: ; $(info $(M) running tidy…) @ ## Run tidy $Q $(GO) mod tidy -.PHONY: test -test: ; $(info $(M) running go test…) - $(Q) $(GO) test $(GO_DIRS_TO_TEST) -tags=noexit - -.PHONY: test-cover -test-cover: $(GCOV2LCOV) ; $(info $(M) running go test…) - # NOTE: if you update the 'test-cover' target, also update ./github/workflows/test.yml - # for the Windows part of the "test-generator" job. - $(Q) $(GO) test -tags=noexit -race -covermode atomic -coverprofile=cover.out -coverpkg=./... $(GO_DIRS_TO_TEST) - $(Q) $(GCOV2LCOV) -infile cover.out -outfile coverage.lcov - .PHONY: generate generate: $(CONTROLLER_GEN) $(CONVERSION_GEN) ## Generate code # Force regeneration of all of the conversions - @echo "Deleting old controller gen files" + @echo "Deleting old deepcopy files" $(Q) find "./apis" -type f -name "zz_generated.*" -delete - @echo "Executing controller-gen" + @echo "Executing controller-gen to generate deepcopy functions" $(Q) $(CONTROLLER_GEN) object:headerFile=../boilerplate.go.txt paths="./..." # @echo "Executing conversion-gen" @@ -70,15 +63,64 @@ generate: $(CONTROLLER_GEN) $(CONVERSION_GEN) ## Generate code # --go-header-file=../boilerplate.go.txt # Force regeneration of all of the CRDs - @echo "Deleting old CRDs" + @echo "Deleting old CRD YAMLs" $(Q) if [ -d "./config/crd/bases" ]; then find "./config/crd/bases" -type f -name "*" -delete; fi - @echo "Executing controller-gen to generate CRDs" - $(Q) $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./apis/..." output:crd:artifacts:config=config/crd/bases + @echo "Executing controller-gen to generate CRD and RBAC YAMLs" + $(Q) $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases .PHONY: build build: tidy lint ; $(info $(M) building ./bin/$(APP)) - $(Q) $(GO) build -o ./bin/$(APP) + $(Q) CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on $(GO) build -o ./bin/$(APP) + +## -------------------------------------- +## Test +## -------------------------------------- +.PHONY: test +test: ; $(info $(M) running go test…) + $(Q) $(GO) test $(GO_DIRS_TO_TEST) -short -tags=noexit + +.PHONY: test-cover +test-cover: $(GCOV2LCOV) ; $(info $(M) running go test…) + # NOTE: if you update the 'test-cover' target, also update ./github/workflows/test.yml + # for the Windows part of the "test-generator" job. + $(Q) $(GO) test -short -tags=noexit -race -covermode atomic -coverprofile=cover.out -coverpkg=./... $(GO_DIRS_TO_TEST) + $(Q) $(GCOV2LCOV) -infile cover.out -outfile coverage.lcov + +# Initially this target uses kind as there is no need to end to end test on Azure. +# Eventually when we start supporting features like MSI that only work in Azure, +# it's likely we'll need to make the integration tests (or at least some of them) +# run in an AKS cluster. +.PHONY: test-int-no-cleanup +test-int-no-cleanup: ; $(info $(M) running controller integration test…) + # MUST be executed as single command, or env vars will not propagate to test execution + $(Q) $(GO) test $(GO_DIRS_TO_TEST) # TODO: better way to just run integration tests? + +.PHONY: test-int-no-cleanup-cover +test-int-no-cleanup-cover: $(GCOV2LCOV) ; $(info $(M) running controller integration test…) + # MUST be executed as single command, or env vars will not propagate to test execution + $(Q) $(GO) test -race -covermode atomic -coverprofile=cover.out -coverpkg=./... $(GO_DIRS_TO_TEST) + $(Q) $(GCOV2LCOV) -infile cover.out -outfile coverage.lcov + +.PHONY: test-int +test-int: kind-create deploy test-int-no-cleanup kind-delete cleanup-test-azure-resources + +.PHONY: test-int-cover +test-int-cover: kind-create deploy test-int-no-cleanup-cover kind-delete cleanup-test-azure-resources + +# Cleanup resource groups created by tests -- this isn't strictly required as the tests +# clean up after themselves, but doing it here anyway just to be doubly sure we don't leak +# resources in cases where the test pass is terminated, panics, etc +# This finds all resource groups which match the specified pattern (k8sinfratest) and are older than a day +# (86400 seconds). This is a bit horrible but it works... +.PHONY: cleanup-test-azure-resources +cleanup-test-azure-resources: + $(Q) rgs=`az group list --query "[*].{Name: name, CreatedAt: tags.CreatedAt}" \ + | jq -r '.[] | select(.Name | test("^k8sinfratest")) | select(.CreatedAt == null or now-(.CreatedAt | fromdate) > 86400) | .Name'`; \ + for rgname in $${rgs[@]} ; do \ + echo "$$rgname will be deleted"; \ + az group delete --name $$rgname --no-wait --yes; \ + done ## -------------------------------------- ## Development @@ -90,16 +132,7 @@ kind-delete: $(KIND) ## Destroys the "k8sinfra" kind cluster. .PHONY: kind-create kind-create: - $(Q) $(KIND) get clusters | grep -E $(KIND_CLUSTER_NAME) > /dev/null;\ - EXISTS=$$?;\ - if [ $$EXISTS -eq 0 ]; then \ - echo "$(KIND_CLUSTER_NAME) already exists"; \ - else \ - $(KIND) create cluster --name=$(KIND_CLUSTER_NAME); \ - fi; \ - - # TODO: Need to use this script when we actually start installing stuff from a registry - # $(SCRIPTS_DIR)/kind-with-registry.sh + export KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) && $(SCRIPTS_DIR)/kind-with-registry.sh # TODO: We may want this later #.PHONY: apply-certs-and-secrets @@ -109,7 +142,7 @@ kind-create: .PHONY: run run: export ENVIRONMENT = development run: kind-create install ## Run a development cluster using kind - $(GO) run ./main.go -v 4 + $(Q) $(GO) run ./main.go -v $(CONTROLLER_DEBUG_LOGLEVEL) ## -------------------------------------- ## Deploy @@ -125,7 +158,9 @@ uninstall: generate $(KUBECTL) $(KUSTOMIZE) ## Uninstall CRDs from a cluster .PHONY: deploy deploy: generate $(KUBECTL) $(KUSTOMIZE) docker-build docker-push ## Deploy controller in the configured Kubernetes cluster in ~/.kube/config + # TODO: Consider patching in CONTROLLER_DEBUG_LOGLEVEL? $(KUSTOMIZE) build config/default | sed "s_${CONFIG_REGISTRY}_${REGISTRY}/${IMG}_" | $(KUBECTL) apply -f - + $(SCRIPTS_DIR)/deploy_testing_secret.sh .PHONY: docker-build docker-build: ## Build the docker image diff --git a/hack/generated/config/default/kustomization.yaml b/hack/generated/config/default/kustomization.yaml index 0ee8b0858..04d0b5614 100644 --- a/hack/generated/config/default/kustomization.yaml +++ b/hack/generated/config/default/kustomization.yaml @@ -6,7 +6,7 @@ namespace: k8s-infra-system # "wordpress" becomes "alices-wordpress". # Note that it should also match with the prefix (text before '-') of the namespace # field above. -# namePrefix: k8s-infra- +namePrefix: k8s-infra- # Labels to add to all resources and selectors. #commonLabels: @@ -14,8 +14,8 @@ namespace: k8s-infra-system bases: - ../crd -# - ../rbac -# - ../manager +- ../rbac +- ../manager # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml # - ../webhook # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. diff --git a/hack/generated/config/manager/kustomization.yaml b/hack/generated/config/manager/kustomization.yaml new file mode 100644 index 000000000..4cc024f62 --- /dev/null +++ b/hack/generated/config/manager/kustomization.yaml @@ -0,0 +1,7 @@ +resources: +- manager.yaml + +patchesStrategicMerge: + - manager_auth_proxy_patch.yaml + - manager_image_patch.yaml + - manager_pull_policy.yaml diff --git a/hack/generated/config/manager/manager.yaml b/hack/generated/config/manager/manager.yaml new file mode 100644 index 000000000..9aab520ac --- /dev/null +++ b/hack/generated/config/manager/manager.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - # command: + # - /manager + args: + - --enable-leader-election + image: controller:latest + name: manager + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 200m + memory: 256Mi + terminationGracePeriodSeconds: 10 diff --git a/hack/generated/config/manager/manager_auth_proxy_patch.yaml b/hack/generated/config/manager/manager_auth_proxy_patch.yaml new file mode 100644 index 000000000..61cb5e7cb --- /dev/null +++ b/hack/generated/config/manager/manager_auth_proxy_patch.yaml @@ -0,0 +1,25 @@ +# This patch inject a sidecar container which is a HTTP proxy for the controller manager, +# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=10" + ports: + - containerPort: 8443 + name: https + - name: manager + args: + - "--metrics-addr=127.0.0.1:8080" + - "--enable-leader-election" diff --git a/hack/generated/config/manager/manager_image_patch.yaml b/hack/generated/config/manager/manager_image_patch.yaml new file mode 100644 index 000000000..5fc26105c --- /dev/null +++ b/hack/generated/config/manager/manager_image_patch.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + # Change the value of image field below to your controller image URL + - image: kind-registry:5000/fake/k8s-infra-controller:latest + name: manager + env: + - name: AZURE_CLIENT_ID + valueFrom: + secretKeyRef: + name: k8sinfra-controller-settings + key: AZURE_CLIENT_ID + - name: AZURE_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: k8sinfra-controller-settings + key: AZURE_CLIENT_SECRET + - name: AZURE_TENANT_ID + valueFrom: + secretKeyRef: + name: k8sinfra-controller-settings + key: AZURE_TENANT_ID + - name: AZURE_SUBSCRIPTION_ID + valueFrom: + secretKeyRef: + name: k8sinfra-controller-settings + key: AZURE_SUBSCRIPTION_ID diff --git a/hack/generated/config/manager/manager_pull_policy.yaml b/hack/generated/config/manager/manager_pull_policy.yaml new file mode 100644 index 000000000..db7843a90 --- /dev/null +++ b/hack/generated/config/manager/manager_pull_policy.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: Always diff --git a/hack/generated/config/rbac/auth_proxy_role.yaml b/hack/generated/config/rbac/auth_proxy_role.yaml new file mode 100644 index 000000000..618f5e417 --- /dev/null +++ b/hack/generated/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: proxy-role +rules: +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] diff --git a/hack/generated/config/rbac/auth_proxy_role_binding.yaml b/hack/generated/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 000000000..48ed1e4b8 --- /dev/null +++ b/hack/generated/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/hack/generated/config/rbac/auth_proxy_service.yaml b/hack/generated/config/rbac/auth_proxy_service.yaml new file mode 100644 index 000000000..6cf656be1 --- /dev/null +++ b/hack/generated/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + control-plane: controller-manager diff --git a/hack/generated/config/rbac/kustomization.yaml b/hack/generated/config/rbac/kustomization.yaml new file mode 100644 index 000000000..817f1fe61 --- /dev/null +++ b/hack/generated/config/rbac/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# Comment the following 3 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml diff --git a/hack/generated/config/rbac/leader_election_role.yaml b/hack/generated/config/rbac/leader_election_role.yaml new file mode 100644 index 000000000..eaa79158f --- /dev/null +++ b/hack/generated/config/rbac/leader_election_role.yaml @@ -0,0 +1,32 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create diff --git a/hack/generated/config/rbac/leader_election_role_binding.yaml b/hack/generated/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 000000000..eed16906f --- /dev/null +++ b/hack/generated/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/hack/generated/config/rbac/resourcegroup_editor_role.yaml b/hack/generated/config/rbac/resourcegroup_editor_role.yaml new file mode 100644 index 000000000..b73c5fe24 --- /dev/null +++ b/hack/generated/config/rbac/resourcegroup_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit resourcegroups. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: resourcegroup-editor-role +rules: +- apiGroups: + - microsoft.resources.infra.azure.com + resources: + - resourcegroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - microsoft.resources.infra.azure.com + resources: + - resourcegroups/status + verbs: + - get diff --git a/hack/generated/config/rbac/resourcegroup_viewer_role.yaml b/hack/generated/config/rbac/resourcegroup_viewer_role.yaml new file mode 100644 index 000000000..85fe0cd50 --- /dev/null +++ b/hack/generated/config/rbac/resourcegroup_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view resourcegroups. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: resourcegroup-viewer-role +rules: +- apiGroups: + - microsoft.resources.infra.azure.com + resources: + - resourcegroups + verbs: + - get + - list + - watch +- apiGroups: + - microsoft.resources.infra.azure.com + resources: + - resourcegroups/status + verbs: + - get diff --git a/hack/generated/config/rbac/role.yaml b/hack/generated/config/rbac/role.yaml new file mode 100644 index 000000000..0143c0360 --- /dev/null +++ b/hack/generated/config/rbac/role.yaml @@ -0,0 +1,78 @@ + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - microsoft.batch.infra.azure.com + resources: + - batchaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - microsoft.batch.infra.azure.com + resources: + - batchaccounts/status + verbs: + - get + - patch + - update +- apiGroups: + - microsoft.resources.infra.azure.com + resources: + - resourcegroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - microsoft.resources.infra.azure.com + resources: + - resourcegroups/status + verbs: + - get + - patch + - update +- apiGroups: + - microsoft.storage.infra.azure.com + resources: + - storageaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - microsoft.storage.infra.azure.com + resources: + - storageaccounts/status + verbs: + - get + - patch + - update diff --git a/hack/generated/config/rbac/role_binding.yaml b/hack/generated/config/rbac/role_binding.yaml new file mode 100644 index 000000000..8f2658702 --- /dev/null +++ b/hack/generated/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/hack/generated/controllers/controller_test.go b/hack/generated/controllers/controller_test.go new file mode 100644 index 000000000..fbd76ecc3 --- /dev/null +++ b/hack/generated/controllers/controller_test.go @@ -0,0 +1,100 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package controllers_test + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + + storage "github.com/Azure/k8s-infra/hack/generated/apis/microsoft.storage/v20190401" + "github.com/Azure/k8s-infra/hack/generated/pkg/armclient" +) + +func Test_ResourceGroup_CRUD(t *testing.T) { + g := NewGomegaWithT(t) + ctx := context.Background() + + // Create a resource group + rg := testContext.NewTestResourceGroup() + err := testContext.KubeClient.Create(ctx, rg) + g.Expect(err).ToNot(HaveOccurred()) + + // It should be created in Kubernetes + g.Eventually(rg).Should(testContext.MatcherMaker.BeProvisioned(ctx)) + + g.Expect(rg.Status.Location).To(Equal(testContext.AzureRegion)) + g.Expect(rg.Status.Properties.ProvisioningState).To(Equal(string(armclient.SucceededProvisioningState))) + g.Expect(rg.Status.ID).ToNot(BeNil()) + armId := rg.Status.ID + + // Delete the resource group + err = testContext.KubeClient.Delete(ctx, rg) + g.Expect(err).ToNot(HaveOccurred()) + g.Eventually(rg).Should(testContext.MatcherMaker.BeDeleted(ctx)) + + // Ensure that the resource group was really deleted in Azure + // TODO: Do we want to just use an SDK here? This process is quite icky as is... + exists, err := testContext.AzureClient.HeadResource( + ctx, + armId, + "2020-06-01") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(exists).To(BeFalse()) +} + +func Test_StorageAccount_CRUD(t *testing.T) { + g := NewGomegaWithT(t) + ctx := context.Background() + + // Custom namer because storage accounts have strict names + namer := testContext.Namer.WithSeparator("") + + // Create a storage account + accessTier := storage.StorageAccountPropertiesCreateParametersAccessTierHot + acct := &storage.StorageAccount{ + ObjectMeta: testContext.MakeObjectMetaWithName(namer.GenerateName("stor")), + Spec: storage.StorageAccounts_Spec{ + Location: testContext.AzureRegion, + ApiVersion: "2019-04-01", // TODO: This should be removed from the storage type eventually + Owner: testContext.SharedResourceGroupOwner(), + Kind: storage.StorageAccountsSpecKindBlobStorage, + Sku: storage.Sku{ + Name: storage.SkuNameStandardLRS, + }, + // TODO: They mark this property as optional but actually it is required + Properties: &storage.StorageAccountPropertiesCreateParameters{ + AccessTier: &accessTier, + }, + }, + } + err := testContext.KubeClient.Create(ctx, acct) + g.Expect(err).ToNot(HaveOccurred()) + + // It should be created in Kubernetes + g.Eventually(acct).Should(testContext.MatcherMaker.BeProvisioned(ctx)) + + g.Expect(acct.Status.Location).To(Equal(testContext.AzureRegion)) + expectedKind := storage.StorageAccountStatusKindBlobStorage + g.Expect(acct.Status.Kind).To(Equal(&expectedKind)) + g.Expect(acct.Status.Id).ToNot(BeNil()) + armId := *acct.Status.Id + + // Delete the storage account + err = testContext.KubeClient.Delete(ctx, acct) + g.Expect(err).ToNot(HaveOccurred()) + g.Eventually(acct).Should(testContext.MatcherMaker.BeDeleted(ctx)) + + // Ensure that the resource group was really deleted in Azure + // TODO: Do we want to just use an SDK here? This process is quite icky as is... + exists, err := testContext.AzureClient.HeadResource( + ctx, + armId, + "2019-04-01") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(exists).To(BeFalse()) +} diff --git a/hack/generated/controllers/generic_controller.go b/hack/generated/controllers/generic_controller.go index 378a630b2..f13060fa3 100644 --- a/hack/generated/controllers/generic_controller.go +++ b/hack/generated/controllers/generic_controller.go @@ -40,6 +40,15 @@ const ( GenericControllerFinalizer = "generated.infra.azure.com/finalizer" ) +// TODO: We need to generate this +// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch +// +kubebuilder:rbac:groups=microsoft.resources.infra.azure.com,resources=resourcegroups,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=microsoft.resources.infra.azure.com,resources=resourcegroups/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=microsoft.storage.infra.azure.com,resources=storageaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=microsoft.storage.infra.azure.com,resources=storageaccounts/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=microsoft.batch.infra.azure.com,resources=batchaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=microsoft.batch.infra.azure.com,resources=batchaccounts/status,verbs=get;update;patch + // GenericReconciler reconciles resources type GenericReconciler struct { Log logr.Logger diff --git a/hack/generated/controllers/suite_test.go b/hack/generated/controllers/suite_test.go new file mode 100644 index 000000000..a4c30982c --- /dev/null +++ b/hack/generated/controllers/suite_test.go @@ -0,0 +1,144 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package controllers_test + +import ( + "context" + "log" + "os" + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + resources "github.com/Azure/k8s-infra/hack/generated/apis/microsoft.resources/v20200601" + "github.com/Azure/k8s-infra/hack/generated/controllers" + "github.com/Azure/k8s-infra/hack/generated/pkg/genruntime" + "github.com/Azure/k8s-infra/hack/generated/pkg/testcommon" +) + +const ( + TestRegion = "westus" + TestNamespace = "k8s-infra-test-ns" + DefaultResourceTimeout = 2 * time.Minute +) + +var testContext ControllerTestContext + +type ControllerTestContext struct { + *testcommon.TestContext + SharedResourceGroup *resources.ResourceGroup +} + +func (tc *ControllerTestContext) SharedResourceGroupOwner() genruntime.KnownResourceReference { + return genruntime.KnownResourceReference{Name: tc.SharedResourceGroup.Name} +} + +func setup() error { + ctx := context.Background() + log.Println("Running test setup") + + gomega.SetDefaultEventuallyTimeout(DefaultResourceTimeout) + gomega.SetDefaultEventuallyPollingInterval(5 * time.Second) + + newCtx, err := testcommon.NewTestContext( + TestRegion, + TestNamespace, + controllers.ResourceStateAnnotation, + controllers.ResourceErrorAnnotation) + + if err != nil { + return err + } + + err = newCtx.CreateTestNamespace() + if err != nil { + return err + } + + // Create a shared resource group, for tests to use + sharedResourceGroup := newCtx.NewTestResourceGroup() + err = newCtx.KubeClient.Create(ctx, sharedResourceGroup) + if err != nil { + return errors.Wrapf(err, "creating shared resource group") + } + + // TODO: Should use AzureName rather than Name once it's always set + log.Printf("Created shared resource group %s\n", sharedResourceGroup.Name) + + // It should be created in Kubernetes + err = testcommon.WaitFor(ctx, DefaultResourceTimeout, func(waitCtx context.Context) (bool, error) { + return newCtx.Ensure.Provisioned(waitCtx, sharedResourceGroup) + }) + if err != nil { + return errors.Wrapf(err, "waiting for shared resource group") + } + + log.Println("Done with test setup") + + testContext = ControllerTestContext{newCtx, sharedResourceGroup} + + return nil +} + +func teardown() error { + log.Println("Started common controller test teardown") + + ctx := context.Background() + + // List all of the resource groups + rgList := &resources.ResourceGroupList{} + err := testContext.KubeClient.List(ctx, rgList, &client.ListOptions{Namespace: testContext.Namespace}) + if err != nil { + return errors.Wrap(err, "listing resource groups") + } + + // Delete any leaked resource groups + var errs []error + + var resourceGroups []runtime.Object + + for _, rg := range rgList.Items { + rg := rg // Copy so that we can safely take addr + resourceGroups = append(resourceGroups, &rg) + err := testContext.KubeClient.Delete(ctx, &rg) + if err != nil { + errs = append(errs, err) + } + } + err = kerrors.NewAggregate(errs) + if err != nil { + return err + } + + // Don't block forever waiting for delete to complete + err = testcommon.WaitFor(ctx, DefaultResourceTimeout, func(waitCtx context.Context) (bool, error) { + return testContext.Ensure.AllDeleted(waitCtx, resourceGroups) + }) + if err != nil { + return errors.Wrapf(err, "waiting for all resource groups to delete") + } + + log.Println("Finished common controller test teardown") + return nil +} + +func TestMain(m *testing.M) { + os.Exit(testcommon.SetupTeardownTestMain(m, true, setup, teardown)) +} + +// TODO: Do we need this? +//func PanicRecover(t *testing.T) { +// if err := recover(); err != nil { +// t.Logf("caught panic in test: %v", err) +// t.Logf("stacktrace from panic: \n%s", string(debug.Stack())) +// t.Fail() +// } +//} diff --git a/hack/generated/pkg/testcommon/be_deleted_matcher.go b/hack/generated/pkg/testcommon/be_deleted_matcher.go new file mode 100644 index 000000000..987af04c5 --- /dev/null +++ b/hack/generated/pkg/testcommon/be_deleted_matcher.go @@ -0,0 +1,91 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package testcommon + +import ( + "context" + + gomegaformat "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func actualAsObj(actual interface{}) (controllerutil.Object, error) { + obj, ok := actual.(controllerutil.Object) + if !ok { + return nil, errors.Errorf("expected controllerutil.Object, was: %T", actual) + } + + return obj, nil +} + +type BeDeletedMatcher struct { + ensure *Ensure + ctx context.Context + + subsequentMissingDeleteTimestamps int +} + +var _ types.GomegaMatcher = &BeDeletedMatcher{} + +func (m *BeDeletedMatcher) Match(actual interface{}) (bool, error) { + + if actual == nil { + return false, nil + } + + obj, err := actualAsObj(actual) + if err != nil { + return false, err + } + + return m.ensure.Deleted(m.ctx, obj) +} + +func (m *BeDeletedMatcher) FailureMessage(actual interface{}) string { + obj, err := actualAsObj(actual) + if err != nil { + // Gomegas contract is that it won't call one of the message functions + // if Match returned an error. If we make it here somehow that contract + // has been violated + panic(err) + } + + return gomegaformat.Message(obj, "to be deleted") +} + +func (m *BeDeletedMatcher) NegatedFailureMessage(actual interface{}) string { + obj, err := actualAsObj(actual) + if err != nil { + // Gomegas contract is that it won't call one of the message functions + // if Match returned an error. If we make it here somehow that contract + // has been violated + panic(err) + } + + return gomegaformat.Message(obj, "not to be deleted") +} + +// MatchMayChangeInTheFuture implements OracleMatcher which of course isn't exported so we can't type-assert we implement it +func (m *BeDeletedMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if actual == nil { + return false + } + + obj, err := actualAsObj(actual) + if err != nil { + panic(err) + } + + // Initial object may not have a deletion timestamp set yet so look instead + // for subsequent calls that all don't have timestamp + deletionTimestamp := obj.GetDeletionTimestamp() + if deletionTimestamp == nil { + m.subsequentMissingDeleteTimestamps++ + } + return m.subsequentMissingDeleteTimestamps <= 1 +} diff --git a/hack/generated/pkg/testcommon/be_provisioned_matcher.go b/hack/generated/pkg/testcommon/be_provisioned_matcher.go new file mode 100644 index 000000000..1a801ddd2 --- /dev/null +++ b/hack/generated/pkg/testcommon/be_provisioned_matcher.go @@ -0,0 +1,83 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package testcommon + +import ( + "context" + "fmt" + + gomegaformat "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" + + "github.com/Azure/k8s-infra/hack/generated/pkg/armclient" +) + +type BeProvisionedMatcher struct { + ensure *Ensure + ctx context.Context +} + +var _ types.GomegaMatcher = &BeProvisionedMatcher{} + +func (m *BeProvisionedMatcher) Match(actual interface{}) (bool, error) { + + if actual == nil { + return false, nil + } + + obj, err := actualAsObj(actual) + if err != nil { + return false, err + } + + return m.ensure.Provisioned(m.ctx, obj) +} + +func (m *BeProvisionedMatcher) FailureMessage(actual interface{}) string { + obj, err := actualAsObj(actual) + if err != nil { + // Gomegas contract is that it won't call one of the message functions + // if Match returned an error. If we make it here somehow that contract + // has been violated + panic(err) + } + + state := obj.GetAnnotations()[m.ensure.stateAnnotation] + errorString := obj.GetAnnotations()[m.ensure.errorAnnotation] + + return gomegaformat.Message( + state, + fmt.Sprintf("state to be %s. Error is: %s", string(armclient.SucceededProvisioningState), errorString)) +} + +func (m *BeProvisionedMatcher) NegatedFailureMessage(actual interface{}) string { + obj, err := actualAsObj(actual) + if err != nil { + // Gomegas contract is that it won't call one of the message functions + // if Match returned an error. If we make it here somehow that contract + // has been violated + panic(err) + } + + state := obj.GetAnnotations()[m.ensure.stateAnnotation] + + return gomegaformat.Message(state, fmt.Sprintf("state not to be %s", string(armclient.SucceededProvisioningState))) +} + +// MatchMayChangeInTheFuture implements OracleMatcher which of course isn't exported so we can't type-assert we implement it +func (m *BeProvisionedMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if actual == nil { + return false + } + + obj, err := actualAsObj(actual) + if err != nil { + panic(err) + } + state := obj.GetAnnotations()[m.ensure.stateAnnotation] + + return !armclient.IsTerminalProvisioningState(armclient.ProvisioningState(state)) +} diff --git a/hack/generated/pkg/testcommon/ensure.go b/hack/generated/pkg/testcommon/ensure.go new file mode 100644 index 000000000..8d3ae8ffb --- /dev/null +++ b/hack/generated/pkg/testcommon/ensure.go @@ -0,0 +1,90 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package testcommon + +import ( + "context" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/Azure/k8s-infra/hack/generated/pkg/armclient" +) + +type Ensure struct { + kubeClient client.Client + stateAnnotation string + errorAnnotation string +} + +func NewEnsure(c client.Client, stateAnnotation string, errorAnnotation string) *Ensure { + return &Ensure{ + kubeClient: c, + stateAnnotation: stateAnnotation, + errorAnnotation: errorAnnotation, + } +} + +// Provisioned checks to ensure the provisioning state of the resource is successful. +func (e *Ensure) Provisioned(ctx context.Context, obj runtime.Object) (bool, error) { + key, err := client.ObjectKeyFromObject(obj) + if err != nil { + return false, err + } + + err = e.kubeClient.Get(ctx, key, obj) + if err != nil { + return false, err + } + + // Have to cast because return of kubeClient.Get is not a metav1.Object unfortunately. + metaObj, ok := obj.(metav1.Object) + if !ok { + return false, errors.Errorf("result of get was not metav1.Object, was: %T", obj) + } + + state := metaObj.GetAnnotations()[e.stateAnnotation] + return state == string(armclient.SucceededProvisioningState), nil +} + +// Deleted ensures that the object specified has been deleted +func (e *Ensure) Deleted(ctx context.Context, obj runtime.Object) (bool, error) { + key, err := client.ObjectKeyFromObject(obj) + if err != nil { + return false, err + } + + // Note that obj won't be modified if it's already deleted, so + // could be "stale" after this call + err = e.kubeClient.Get(ctx, key, obj) + if apierrors.IsNotFound(err) { + return true, nil + } + if err != nil { + return false, err + } + + return false, nil +} + +// AllDeleted ensures that all of the specified objects are deleted +func (e *Ensure) AllDeleted(ctx context.Context, objs []runtime.Object) (bool, error) { + for _, obj := range objs { + // It's possible that this is horribly inefficient. Should be good enough for now though + deleted, err := e.Deleted(ctx, obj) + if err != nil { + return false, err + } + if !deleted { + return false, nil + } + } + + return true, nil +} diff --git a/hack/generated/pkg/testcommon/matcher_maker.go b/hack/generated/pkg/testcommon/matcher_maker.go new file mode 100644 index 000000000..4043414c5 --- /dev/null +++ b/hack/generated/pkg/testcommon/matcher_maker.go @@ -0,0 +1,37 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package testcommon + +import ( + "context" + + "github.com/onsi/gomega/types" +) + +// TODO: Would we rather these just be on testcontext? Might read better +type MatcherMaker struct { + ensure *Ensure +} + +func NewMatcherMaker(ensure *Ensure) *MatcherMaker { + return &MatcherMaker{ + ensure: ensure, + } +} + +func (m *MatcherMaker) BeProvisioned(ctx context.Context) types.GomegaMatcher { + return &BeProvisionedMatcher{ + ensure: m.ensure, + ctx: ctx, + } +} + +func (m *MatcherMaker) BeDeleted(ctx context.Context) types.GomegaMatcher { + return &BeDeletedMatcher{ + ensure: m.ensure, + ctx: ctx, + } +} diff --git a/hack/generated/pkg/testcommon/resource_namer.go b/hack/generated/pkg/testcommon/resource_namer.go new file mode 100644 index 000000000..e7c0c7cb8 --- /dev/null +++ b/hack/generated/pkg/testcommon/resource_namer.go @@ -0,0 +1,54 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package testcommon + +import ( + "math/rand" + "strings" + "time" +) + +type ResourceNamer struct { + rand *rand.Rand + runes []rune + prefix string + randomChars int + separator string +} + +func NewResourceNamer(prefix string, separator string, randomChars int) *ResourceNamer { + return &ResourceNamer{ + rand: rand.New(rand.NewSource(time.Now().UnixNano())), + runes: []rune("abcdefghijklmnopqrstuvwxyz"), + prefix: prefix, + randomChars: randomChars, + separator: separator, + } +} + +func (n *ResourceNamer) WithSeparator(separator string) *ResourceNamer { + return NewResourceNamer(n.prefix, separator, n.randomChars) +} + +func (n *ResourceNamer) generateName(prefix string, num int) string { + result := make([]rune, num) + for i := 0; i < num; i++ { + result[i] = n.runes[n.rand.Intn(len(n.runes))] + } + + var s []string + if prefix != "" { + s = []string{n.prefix, prefix, string(result)} + } else { + s = []string{n.prefix, string(result)} + } + + return strings.Join(s, n.separator) +} + +func (n *ResourceNamer) GenerateName(prefix string) string { + return n.generateName(prefix, n.randomChars) +} diff --git a/hack/generated/pkg/testcommon/test_context.go b/hack/generated/pkg/testcommon/test_context.go new file mode 100644 index 000000000..376f76d44 --- /dev/null +++ b/hack/generated/pkg/testcommon/test_context.go @@ -0,0 +1,133 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package testcommon + +import ( + "context" + "os" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + resources "github.com/Azure/k8s-infra/hack/generated/apis/microsoft.resources/v20200601" + storage "github.com/Azure/k8s-infra/hack/generated/apis/microsoft.storage/v20190401" + "github.com/Azure/k8s-infra/hack/generated/pkg/armclient" +) + +// If you modify this make sure to modify the cleanup-test-azure-resources target in the Makefile too +const ResourcePrefix = "k8sinfratest" + +type TestContext struct { + Namer *ResourceNamer + KubeClient client.Client + Ensure *Ensure + MatcherMaker *MatcherMaker + AzureClient armclient.Applier + + AzureRegion string + Namespace string + AzureSubscription string +} + +// TODO: State Annotation parameter should be removed once the interface for Status determined and promoted +// TODO: to genruntime. Same for errorAnnotation +func NewTestContext(region string, namespace string, stateAnnotation string, errorAnnotation string) (*TestContext, error) { + scheme := CreateScheme() + config, err := ctrl.GetConfig() + if err != nil { + return nil, errors.Wrap(err, "unable to get kubeconfig") + } + + kubeClient, err := client.New(config, client.Options{Scheme: scheme}) + if err != nil { + return nil, errors.Wrapf(err, "creating kubeclient") + } + + armClient, err := armclient.NewAzureTemplateClient() + if err != nil { + return nil, errors.Wrapf(err, "creating armclient") + } + + subscription, ok := os.LookupEnv("AZURE_SUBSCRIPTION_ID") + if !ok { + return nil, errors.New("couldn't find AZURE_SUBSCRIPTION_ID") + } + + ensure := NewEnsure(kubeClient, stateAnnotation, errorAnnotation) + + return &TestContext{ + AzureClient: armClient, + AzureRegion: region, + AzureSubscription: subscription, // TODO: Do we really need this? + Ensure: ensure, + MatcherMaker: NewMatcherMaker(ensure), + KubeClient: kubeClient, + Namer: NewResourceNamer(ResourcePrefix, "-", 6), + Namespace: namespace, + }, nil +} + +func (tc *TestContext) CreateTestNamespace() error { + ctx := context.Background() + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: tc.Namespace, + }, + } + _, err := controllerutil.CreateOrUpdate(ctx, tc.KubeClient, ns, func() error { + return nil + }) + if err != nil { + return errors.Wrapf(err, "creating namespace") + } + + return nil +} + +func (tc *TestContext) MakeObjectMeta(prefix string) ctrl.ObjectMeta { + return ctrl.ObjectMeta{ + Name: tc.Namer.GenerateName(prefix), + Namespace: tc.Namespace, + } +} + +func (tc *TestContext) MakeObjectMetaWithName(name string) ctrl.ObjectMeta { + return ctrl.ObjectMeta{ + Name: name, + Namespace: tc.Namespace, + } +} + +func (tc *TestContext) NewTestResourceGroup() *resources.ResourceGroup { + return &resources.ResourceGroup{ + ObjectMeta: tc.MakeObjectMeta("rg"), + Spec: resources.ResourceGroupSpec{ + Location: tc.AzureRegion, + // This tag is used for cleanup optimization + Tags: map[string]string{"CreatedAt": time.Now().UTC().Format(time.RFC3339)}, + }, + } +} + +// TODO: Code generate this (I think I already do in another branch) +func CreateScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + + _ = clientgoscheme.AddToScheme(scheme) + //_ = batch.AddToScheme(scheme) + _ = storage.AddToScheme(scheme) + _ = resources.AddToScheme(scheme) + + return scheme +} diff --git a/hack/generated/pkg/testcommon/wait.go b/hack/generated/pkg/testcommon/wait.go new file mode 100644 index 000000000..730d91fa4 --- /dev/null +++ b/hack/generated/pkg/testcommon/wait.go @@ -0,0 +1,68 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package testcommon + +import ( + "context" + "flag" + "log" + "testing" + "time" +) + +// TODO: it's unfortunate we can't just use g.Eventually all the time +func WaitFor(ctx context.Context, timeout time.Duration, check func(context.Context) (bool, error)) error { + waitCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + done := false + + for !done { + select { + case <-waitCtx.Done(): + return waitCtx.Err() + default: + var err error + done, err = check(waitCtx) + if err != nil { + return err + } + } + + time.Sleep(5 * time.Second) + } + + return nil +} + +// testMainWrapper is a wrapper that can be called by TestMain so that we can use defer +func SetupTeardownTestMain( + m *testing.M, + skipShortTests bool, + setup func() error, + teardown func() error) int { + + if skipShortTests { + flag.Parse() + if testing.Short() { + log.Println("Skipping slow tests in short mode") + return 0 + } + } + + setupErr := setup() + if setupErr != nil { + panic(setupErr) + } + + defer func() { + if teardownErr := teardown(); teardownErr != nil { + panic(teardownErr) + } + }() + + return m.Run() +} diff --git a/hack/generator/Makefile b/hack/generator/Makefile index f1d4d4cd0..3a30e3d47 100644 --- a/hack/generator/Makefile +++ b/hack/generator/Makefile @@ -14,13 +14,13 @@ V = 0 Q = $(if $(filter 1,$V),,@) .PHONY: all -all: header-check fmt test gen-arm make-generated-code +all: header-check fmt test gen-arm # There is a ci specific target because we want the CI pass to fail if # the code has not been go fmt-ed, whereas locally we want "make all" # to just format the code for you .PHONY: ci -ci: build test-cover gen-arm make-generated-code-ci +ci: build test-cover gen-arm .PHONY: gen-arm gen-arm: build ; $(info $(M) running k8sinfra-gen gen…) @ ## Running gen @@ -76,11 +76,3 @@ diagrams: $(patsubst %.dot,%.png,$(wildcard spec/images/*.dot)) %.png: %.dot $(info $(M) Generating diagram $@ from $< …) $(Q) dot $< -Tpng -o $@ - -.PHONY: make-generated-code -make-generated-code: - $(Q) cd ../generated && make all - -.PHONY: make-generated-code-ci -make-generated-code-ci: - $(Q) cd ../generated && make ci diff --git a/hack/generator/pkg/astmodel/arm_type.go b/hack/generator/pkg/astmodel/arm_type.go index 8c80f9649..16d75d823 100644 --- a/hack/generator/pkg/astmodel/arm_type.go +++ b/hack/generator/pkg/astmodel/arm_type.go @@ -18,9 +18,9 @@ type ArmType struct { // ArmType is a Type var _ Type = &ArmType{} -// MakeArmType wraps an object type to indicate it's an ARM targeted variation -func MakeArmType(object ObjectType) ArmType { - return ArmType{ +// NewArmType wraps an object type to indicate it's an ARM targeted variation +func NewArmType(object ObjectType) *ArmType { + return &ArmType{ objectType: object, } } @@ -31,22 +31,22 @@ func (at ArmType) RequiredPackageReferences() []PackageReference { } // References returns the names of all types that this type references -func (at ArmType) References() TypeNameSet { +func (at *ArmType) References() TypeNameSet { return at.objectType.References() } // AsType renders as a Go abstract syntax tree for a type -func (at ArmType) AsType(codeGenerationContext *CodeGenerationContext) ast.Expr { +func (at *ArmType) AsType(codeGenerationContext *CodeGenerationContext) ast.Expr { return at.objectType.AsType(codeGenerationContext) } // AsDeclarations renders as a Go abstract syntax tree for a declaration -func (at ArmType) AsDeclarations(codeGenerationContext *CodeGenerationContext, name TypeName, description []string) []ast.Decl { +func (at *ArmType) AsDeclarations(codeGenerationContext *CodeGenerationContext, name TypeName, description []string) []ast.Decl { return at.objectType.AsDeclarations(codeGenerationContext, name, description) } // Equals decides if the types are the same -func (at ArmType) Equals(t Type) bool { +func (at *ArmType) Equals(t Type) bool { if ost, ok := t.(*ArmType); ok { return TypeEquals(&at.objectType, ost) } @@ -55,22 +55,11 @@ func (at ArmType) Equals(t Type) bool { } // String returns a string representation of our ARM type -func (at ArmType) String() string { +func (at *ArmType) String() string { return fmt.Sprintf("ARM(%v)", at.objectType) } // ObjectType returns the underlying object type of the arm type -func (at ArmType) ObjectType() ObjectType { +func (at *ArmType) ObjectType() ObjectType { return at.objectType } - -// IsArmType returns true if the passed type is a Arm type; false otherwise. -func IsArmType(t Type) bool { - _, ok := t.(ArmType) - return ok -} - -// IsArmDefinition returns true if the passed definition is for a Arm type; false otherwise. -func IsArmDefinition(definition TypeDefinition) bool { - return IsArmType(definition.theType) -} diff --git a/hack/generator/pkg/astmodel/arm_type_test.go b/hack/generator/pkg/astmodel/arm_type_test.go index cb9945e50..c43112fde 100644 --- a/hack/generator/pkg/astmodel/arm_type_test.go +++ b/hack/generator/pkg/astmodel/arm_type_test.go @@ -4,75 +4,3 @@ */ package astmodel - -import ( - . "github.com/onsi/gomega" - - "testing" -) - -/* - * IsArmType() tests - */ - -func Test_IsArmType_GivenType_ReturnsExpectedResult(t *testing.T) { - distanceProperty := NewPropertyDefinition("Meters", "meters", IntType) - objectType := EmptyObjectType.WithProperty(distanceProperty) - armType := MakeArmType(ObjectType{}) - - cases := []struct { - name string - thisType Type - expected bool - }{ - {"String is not ARM type", StringType, false}, - {"Map is not ARM type", NewMapType(StringType, BoolType), false}, - {"Object is not ARM type", objectType, false}, - {"ARM is ARM type", armType, true}, - } - - for _, c := range cases { - c := c - t.Run(c.name, func(t *testing.T) { - t.Parallel() - g := NewGomegaWithT(t) - - isArm := IsArmType(c.thisType) - - g.Expect(isArm).To(Equal(c.expected)) - }) - } -} - -/* - * IsArmDefinition() tests - */ - -func Test_IsArmDefinition_GivenType_ReturnsExpectedResult(t *testing.T) { - distanceProperty := NewPropertyDefinition("Meters", "meters", IntType) - objectType := EmptyObjectType.WithProperty(distanceProperty) - stringDefinition := NewTestObject("Range", distanceProperty) - armType := MakeArmType(*objectType) - armDef := MakeTypeDefinition(stringDefinition.name, armType) - - cases := []struct { - name string - definition TypeDefinition - expected bool - }{ - {"String Definition is not ARM definition", stringDefinition, false}, - {"ARM Definition is an ARM definition", armDef, true}, - } - - for _, c := range cases { - c := c - t.Run(c.name, func(t *testing.T) { - t.Parallel() - g := NewGomegaWithT(t) - - isArm := IsArmDefinition(c.definition) - - g.Expect(isArm).To(Equal(c.expected)) - }) - } -} diff --git a/hack/generator/pkg/astmodel/armconversion/shared.go b/hack/generator/pkg/astmodel/armconversion/shared.go index 4fe93a3e8..ba069dc6a 100644 --- a/hack/generator/pkg/astmodel/armconversion/shared.go +++ b/hack/generator/pkg/astmodel/armconversion/shared.go @@ -88,17 +88,18 @@ func GetAzureNameProperty(idFactory astmodel.IdentifierFactory) *astmodel.Proper func getReceiverObjectType(codeGenerationContext *astmodel.CodeGenerationContext, receiver astmodel.TypeName) *astmodel.ObjectType { // Determine the type we're operating on - receiverType, err := codeGenerationContext.GetImportedDefinition(receiver) + rt, err := codeGenerationContext.GetImportedDefinition(receiver) if err != nil { panic(err) } - kubeType, ok := receiverType.Type().(*astmodel.ObjectType) + receiverType, ok := rt.Type().(*astmodel.ObjectType) if !ok { - panic(fmt.Sprintf("receiver for ArmConversionFunction is not of type ObjectType. TypeName: %v, Type %T", receiver, receiverType.Type())) + // Don't expect to have any wrapper types left at this point + panic(fmt.Sprintf("receiver for ArmConversionFunction is not of expected type. TypeName: %v, Type %T", receiver, rt.Type())) } - return kubeType + return receiverType } func generateTypeConversionAssignments( diff --git a/hack/generator/pkg/astmodel/enum_type_test.go b/hack/generator/pkg/astmodel/enum_type_test.go new file mode 100644 index 000000000..4dd8bd899 --- /dev/null +++ b/hack/generator/pkg/astmodel/enum_type_test.go @@ -0,0 +1,114 @@ +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT license. + */ + +package astmodel + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +var ( + aboveValue = EnumValue{ + Identifier: "Above", + Value: "Above", + } + + underValue = EnumValue{ + Identifier: "Under", + Value: "Under", + } + + leftValue = EnumValue{ + Identifier: "Left", + Value: "left", + } + + rightValue = EnumValue{ + Identifier: "Right", + Value: "Right", + } +) + +/* + * NewEnumType tests + */ + +func Test_NewEnumType_GivenValues_InitializesFields(t *testing.T) { + g := NewGomegaWithT(t) + + e := NewEnumType(StringType, []EnumValue{aboveValue, underValue}) + g.Expect(e.baseType).To(Equal(StringType)) + g.Expect(e.options).To(ContainElements(aboveValue, underValue)) +} + +/* + * BaseType() tests + */ + +func Test_EnumTypeBaseType_AfterConstruction_ReturnsExpectedType(t *testing.T) { + g := NewGomegaWithT(t) + + e := NewEnumType(StringType, []EnumValue{aboveValue, underValue}) + g.Expect(e.BaseType()).To(Equal(StringType)) +} + +/* + * Equals() tests + */ + +func Test_EnumTypeEquals_GivenEnums_ReturnsExpectedResult(t *testing.T) { + + enum := NewEnumType(StringType, []EnumValue{aboveValue, underValue}) + + other := NewEnumType(StringType, []EnumValue{aboveValue, underValue}) + differentBase := NewEnumType(IntType, []EnumValue{aboveValue, underValue}) + differentValueOrder := NewEnumType(StringType, []EnumValue{underValue, aboveValue}) + supersetOfValues := NewEnumType(StringType, []EnumValue{underValue, aboveValue, leftValue, rightValue}) + subsetOfValues := NewEnumType(StringType, []EnumValue{underValue}) + differentValues := NewEnumType(StringType, []EnumValue{leftValue, rightValue}) + + cases := []struct { + name string + thisEnum *EnumType + otherEnum *EnumType + expected bool + }{ + // Expect equal to self + {"Equal to self", enum, enum, true}, + {"Equal to self", other, other, true}, + // Expect equal to same + {"Equal to same", enum, other, true}, + {"Equal to same", other, enum, true}, + // Expect equal when values are reordered + {"Equal when values are reordered", enum, differentValueOrder, true}, + {"Equal when values reordered", differentValueOrder, enum, true}, + // Expect not-equal when values missing + {"Not-equal when values missing", enum, subsetOfValues, false}, + {"Not-equal when values missing", supersetOfValues, enum, false}, + // Expect not-equal when properties added + {"Not-equal when values added", enum, supersetOfValues, false}, + {"Not-equal when values added", supersetOfValues, enum, false}, + // Expect not-equal for different base type + {"Not-equal when different base type", enum, differentBase, false}, + {"Not-equal when different base type", differentBase, enum, false}, + // Expect not-equal for different values (but same value count) + {"Not-equal when different values", enum, differentValues, false}, + {"Not-equal when different values", differentValues, enum, false}, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + g := NewGomegaWithT(t) + + areEqual := c.thisEnum.Equals(c.otherEnum) + + g.Expect(areEqual).To(Equal(c.expected)) + }) + } +} diff --git a/hack/generator/pkg/astmodel/file_definition.go b/hack/generator/pkg/astmodel/file_definition.go index 223fb9b22..e83088dc7 100644 --- a/hack/generator/pkg/astmodel/file_definition.go +++ b/hack/generator/pkg/astmodel/file_definition.go @@ -234,12 +234,14 @@ func (file *FileDefinition) AsAst() ast.Node { "Licensed under the MIT license.", CodeGenerationComment) + packageName := file.packageReference.PackageName() + // We set Package (the offset of the package keyword) so that it follows the header comments result := &ast.File{ Doc: &ast.CommentGroup{ List: header, }, - Name: ast.NewIdent(file.packageReference.PackageName()), + Name: ast.NewIdent(packageName), Decls: decls, Package: token.Pos(headerLen), } diff --git a/hack/generator/pkg/astmodel/local_package_reference.go b/hack/generator/pkg/astmodel/local_package_reference.go index 8b23b4264..d473ea9fd 100644 --- a/hack/generator/pkg/astmodel/local_package_reference.go +++ b/hack/generator/pkg/astmodel/local_package_reference.go @@ -53,7 +53,11 @@ func (pr LocalPackageReference) PackagePath() string { // Equals returns true if the passed package reference references the same package, false otherwise func (pr LocalPackageReference) Equals(ref PackageReference) bool { - if other, ok := ref.(LocalPackageReference); ok { + if ref == nil { + return false + } + + if other, ok := ref.AsLocalPackage(); ok { return pr.version == other.version && pr.group == other.group } diff --git a/hack/generator/pkg/astmodel/object_type.go b/hack/generator/pkg/astmodel/object_type.go index 57872c7ba..3401bd921 100644 --- a/hack/generator/pkg/astmodel/object_type.go +++ b/hack/generator/pkg/astmodel/object_type.go @@ -105,6 +105,7 @@ func (objectType *ObjectType) Properties() []*PropertyDefinition { return result } +// Property returns the details of a specific property based on its unique case sensitive name func (objectType *ObjectType) Property(name PropertyName) (*PropertyDefinition, bool) { prop, ok := objectType.properties[name] return prop, ok diff --git a/hack/generator/pkg/astmodel/package_import.go b/hack/generator/pkg/astmodel/package_import.go index def2f32a9..fdb247704 100644 --- a/hack/generator/pkg/astmodel/package_import.go +++ b/hack/generator/pkg/astmodel/package_import.go @@ -56,6 +56,11 @@ func (pi PackageImport) PackageName() string { return pi.PackageReference.PackageName() } +// PackagePath is the full path of the package reference +func (pi PackageImport) PackagePath() string { + return pi.PackageReference.PackagePath() +} + // Equals returns true if the passed package reference references the same package, false otherwise func (pi PackageImport) Equals(ref PackageImport) bool { packagesEqual := pi.PackageReference.Equals(ref.PackageReference) diff --git a/hack/generator/pkg/astmodel/property_definition_test.go b/hack/generator/pkg/astmodel/property_definition_test.go index a75a279a7..bdae22081 100644 --- a/hack/generator/pkg/astmodel/property_definition_test.go +++ b/hack/generator/pkg/astmodel/property_definition_test.go @@ -226,6 +226,54 @@ func Test_PropertyDefinitionWithType_GivenSameType_ReturnsExistingReference(t *t g.Expect(updated).To(BeIdenticalTo(original)) } +/* + * WithValidation() Tests + */ + +func Test_PropertyDefinitionWithValidation_GivenValidation_AddsToValidation(t *testing.T) { + g := NewGomegaWithT(t) + + v := ValidateRequired() + property := NewPropertyDefinition(propertyName, propertyJsonName, propertyType).WithValidation(v) + + g.Expect(property.validations).To(ContainElement(v)) +} + +func Test_PropertyDefinitionWithValidation_GivenValidation_LeavesOriginalUnmodified(t *testing.T) { + g := NewGomegaWithT(t) + + v := ValidateRequired() + original := NewPropertyDefinition(propertyName, propertyJsonName, propertyType) + property := original.WithValidation(v) + + g.Expect(property).NotTo(Equal(original)) +} + +/* + * WithoutValidation() tests + */ + +func Test_PropertyDefinitionWithoutValidation_ReturnsPropertyWithoutValidation(t *testing.T) { + g := NewGomegaWithT(t) + + v := ValidateRequired() + property := NewPropertyDefinition(propertyName, propertyJsonName, propertyType). + WithValidation(v). + WithoutValidation() + + g.Expect(property.validations).To(BeEmpty()) +} + +func Test_PropertyDefinitionWithoutValidation_LeavesOriginalUnmodified(t *testing.T) { + g := NewGomegaWithT(t) + + v := ValidateRequired() + original := NewPropertyDefinition(propertyName, propertyJsonName, propertyType).WithValidation(v) + property := original.WithoutValidation() + + g.Expect(property).NotTo(Equal(original)) +} + /* * MakeRequired() Tests */ diff --git a/hack/generator/pkg/astmodel/reference_graph.go b/hack/generator/pkg/astmodel/reference_graph.go index e34da831b..574fa222c 100644 --- a/hack/generator/pkg/astmodel/reference_graph.go +++ b/hack/generator/pkg/astmodel/reference_graph.go @@ -28,9 +28,9 @@ func CollectResourceDefinitions(definitions Types) TypeNameSet { } // CollectArmSpecDefinitions returns a TypeNameSet of all of the -// arm spec definitions passed in. +// ARM spec definitions passed in. func CollectArmSpecAndStatusDefinitions(definitions Types) TypeNameSet { - findType := func(t Type) (TypeName, error) { + findArmType := func(t Type) (TypeName, error) { name, ok := t.(TypeName) if !ok { return TypeName{}, errors.Errorf("Type was not of type TypeName, instead %T", t) @@ -39,27 +39,37 @@ func CollectArmSpecAndStatusDefinitions(definitions Types) TypeNameSet { armName := CreateArmTypeName(name) if _, ok = definitions[armName]; !ok { - return TypeName{}, errors.Errorf("Couldn't ARM type find %q", armName) + return TypeName{}, errors.Errorf("Couldn't find ARM type %q", armName) } return armName, nil } - // TODO: We should be using a better way to identify ARM types. I believe - // TODO Bevan is working on it. armSpecAndStatus := make(TypeNameSet) for _, def := range definitions { - if resourceType, ok := def.Type().(*ResourceType); ok { - armSpecName, err := findType(resourceType.spec) + if IsStoragePackageReference(def.Name().PackageReference) { + // We need to explicitly skip storage packages because the code below expects to find + // a related ARM type for each resource spec type, but those don't exist in our + // storage packages and the code triggers a panic() based on the error from findArmType + continue + } + + if resourceType, ok := definitions.ResolveResourceType(def.Type()); ok { + + armSpecName, err := findArmType(resourceType.spec) if err != nil { + // This should never happen because every type should have a matching ARM type + // So this panic may indicate we have a bug in the stage that generates the ARM types panic(errors.Wrapf(err, "Error getting ARM spec for resource %q", def.Name())) } armSpecAndStatus.Add(armSpecName) if resourceType.status != nil { - armStatusName, err := findType(resourceType.status) + armStatusName, err := findArmType(resourceType.status) if err != nil { + // This should never happen because every type should have a matching ARM type + // So this panic may indicate we have a bug in the stage that generates the ARM types panic(errors.Wrapf(err, "Error getting ARM status for resource %q", def.Name())) } armSpecAndStatus.Add(armStatusName) diff --git a/hack/generator/pkg/astmodel/resource.go b/hack/generator/pkg/astmodel/resource.go index 1a2698ad9..eb43b7512 100644 --- a/hack/generator/pkg/astmodel/resource.go +++ b/hack/generator/pkg/astmodel/resource.go @@ -41,6 +41,10 @@ func IsResourceType(t Type) bool { return ok } +func IsResourceDefinition(def TypeDefinition) bool { + return IsResourceType(def.Type()) +} + // NewAzureResourceType defines a new resource type for Azure. It ensures that // the resource has certain expected properties such as type and name. // The typeName parameter is just used for logging. diff --git a/hack/generator/pkg/astmodel/storage_package_reference.go b/hack/generator/pkg/astmodel/storage_package_reference.go new file mode 100644 index 000000000..c1b9b2539 --- /dev/null +++ b/hack/generator/pkg/astmodel/storage_package_reference.go @@ -0,0 +1,32 @@ +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT license. + */ + +package astmodel + +const ( + StoragePackageSuffix = "storage" +) + +type StoragePackageReference struct { + LocalPackageReference +} + +var _ PackageReference = StoragePackageReference{} + +// MakeStoragePackageReference creates a new storage package reference from a local package reference +func MakeStoragePackageReference(local LocalPackageReference) StoragePackageReference { + return StoragePackageReference{ + LocalPackageReference{ + group: local.group, + version: local.version + StoragePackageSuffix, + }, + } +} + +// IsStoragePackageReference returns true if the reference is to a storage package +func IsStoragePackageReference(reference PackageReference) bool { + _, ok := reference.(StoragePackageReference) + return ok +} diff --git a/hack/generator/pkg/astmodel/storage_package_reference_test.go b/hack/generator/pkg/astmodel/storage_package_reference_test.go new file mode 100644 index 000000000..d4fecef12 --- /dev/null +++ b/hack/generator/pkg/astmodel/storage_package_reference_test.go @@ -0,0 +1,67 @@ +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT license. + */ + +package astmodel + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func TestMakeStoragePackageReference(t *testing.T) { + + cases := []struct { + group string + version string + expectedVersion string + }{ + {"group", "v1", "v1storage"}, + {"microsoft.network", "v20180501", "v20180501storage"}, + } + + for _, c := range cases { + c := c + t.Run(c.group, func(t *testing.T) { + t.Parallel() + g := NewGomegaWithT(t) + + localRef := MakeLocalPackageReference(c.group, c.version) + storageRef := MakeStoragePackageReference(localRef) + + g.Expect(storageRef.PackageName()).To(Equal(c.expectedVersion)) + }) + } +} + +func TestStoragePackageReferenceEquals(t *testing.T) { + localRef := MakeLocalPackageReference("group", "v1") + storageRef := MakeStoragePackageReference(localRef) + otherRef := MakeStoragePackageReference(localRef) + + cases := []struct { + name string + storageRef StoragePackageReference + otherRef PackageReference + expectedEqual bool + }{ + {"Equal to self", storageRef, storageRef, true}, + {"Equal to other", storageRef, otherRef, true}, + {"Equal to other (reversed)", otherRef, storageRef, true}, + {"Not equal to local", storageRef, localRef, false}, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + g := NewGomegaWithT(t) + + areEqual := c.storageRef.Equals(c.otherRef) + + g.Expect(areEqual).To(Equal(c.expectedEqual)) + }) + } +} diff --git a/hack/generator/pkg/astmodel/storage_type.go b/hack/generator/pkg/astmodel/storage_type.go new file mode 100644 index 000000000..558cfd84e --- /dev/null +++ b/hack/generator/pkg/astmodel/storage_type.go @@ -0,0 +1,70 @@ +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT license. + */ + +package astmodel + +import ( + "fmt" + "go/ast" +) + +// StorageType wraps an existing type to indicate that it is a storage focussed variation +type StorageType struct { + objectType ObjectType +} + +func (st *StorageType) String() string { + return fmt.Sprintf("Storage(%v)", st.objectType) +} + +// StorageType is a Type +var _ Type = &StorageType{} + +// NewStorageType wraps an object type to indicate it's a dedicated storage version +func NewStorageType(objectType ObjectType) *StorageType { + return &StorageType{ + objectType: objectType, + } +} + +// IsStorageType returns true if the passed type is a storage type; false otherwise. +func IsStorageType(t Type) bool { + _, ok := t.(*StorageType) + return ok +} + +// IsStorageDefinition returns true if the passed definition is for a storage type; false otherwise. +func IsStorageDefinition(definition TypeDefinition) bool { + return IsStorageType(definition.Type()) +} + +// RequiredImports returns a list of packages required by this type +func (st *StorageType) RequiredPackageReferences() []PackageReference { + return st.objectType.RequiredPackageReferences() +} + +// References returns the names of all types that this type references +func (st *StorageType) References() TypeNameSet { + return st.objectType.References() +} + +// AsType renders as a Go abstract syntax tree for a type +func (st *StorageType) AsType(codeGenerationContext *CodeGenerationContext) ast.Expr { + return st.objectType.AsType(codeGenerationContext) +} + +// AsDeclarations renders as a Go abstract syntax tree for a declaration +func (st *StorageType) AsDeclarations(codeGenerationContext *CodeGenerationContext, name TypeName, description []string) []ast.Decl { + return st.objectType.AsDeclarations(codeGenerationContext, name, description) +} + +// Equals decides if the types are the same +func (st *StorageType) Equals(t Type) bool { + if other, ok := t.(*StorageType); ok { + return TypeEquals(&st.objectType, other) + } + + return false +} diff --git a/hack/generator/pkg/astmodel/type_definition.go b/hack/generator/pkg/astmodel/type_definition.go index 14fd1d5aa..e6b9d0fe2 100644 --- a/hack/generator/pkg/astmodel/type_definition.go +++ b/hack/generator/pkg/astmodel/type_definition.go @@ -6,6 +6,7 @@ package astmodel import ( + "github.com/pkg/errors" "go/ast" "go/token" ) @@ -100,3 +101,52 @@ func (def TypeDefinition) RequiredPackageReferences() []PackageReference { func FileNameHint(name TypeName) string { return transformToSnakeCase(name.name) } + +// ApplyObjectTransformation applies a specific transformation to the ObjectType contained by this +// definition, returning a new definition +// If the definition does not contain an object, an error will be returned +func (def TypeDefinition) ApplyObjectTransformation(transform func(*ObjectType) (Type, error)) (*TypeDefinition, error) { + // We use a TypeVisitor to allow automatic handling of wrapper types (such as ArmType and StorageType) + visited := false + visitor := MakeTypeVisitor() + visitor.VisitObjectType = func(_ *TypeVisitor, ot *ObjectType, _ interface{}) (Type, error) { + rt, err := transform(ot) + if err != nil { + return nil, err + } + visited = true + return rt, nil + } + + newType, err := visitor.Visit(def.theType, nil) + if err != nil { + return nil, errors.Wrapf(err, "transformation of %v failed", def.name) + } + + if !visited { + return nil, errors.Errorf("transformation was not applied to %v (expected object type, found %v)", def.name, def.theType) + } + + result := def.WithType(newType) + return &result, nil +} + +// ApplyObjectTransformations applies multiple transformations to the ObjectType contained by this +// definition, returning a new definition. +// If the definition does not contain an object, an error will be returned +// The transformations are constrained to return ObjectType results to allow them to be chained together. +func (def TypeDefinition) ApplyObjectTransformations(transforms ...func(*ObjectType) (*ObjectType, error)) (*TypeDefinition, error) { + return def.ApplyObjectTransformation(func(objectType *ObjectType) (Type, error) { + result := objectType + for i, transform := range transforms { + rt, err := transform(result) + if err != nil { + return nil, errors.Wrapf(err, "failed to apply object transformation %d", i) + } + + result = rt + } + + return result, nil + }) +} diff --git a/hack/generator/pkg/astmodel/type_definition_test.go b/hack/generator/pkg/astmodel/type_definition_test.go index 6e3c5eaca..ac6cdbbd2 100644 --- a/hack/generator/pkg/astmodel/type_definition_test.go +++ b/hack/generator/pkg/astmodel/type_definition_test.go @@ -6,6 +6,7 @@ package astmodel import ( + "github.com/pkg/errors" "testing" . "github.com/onsi/gomega" @@ -78,3 +79,139 @@ func createStringProperty(name string, description string) *PropertyDefinition { func createIntProperty(name string, description string) *PropertyDefinition { return NewPropertyDefinition(PropertyName(name), name, IntType).WithDescription(description) } + +/* + * ApplyObjectTransformation() tests + */ + +func TestApplyObjectTransformation_GivenObjectAndTransformation_AppliesTransformation(t *testing.T) { + g := NewGomegaWithT(t) + + ref := MakeTypeName(MakeLocalPackageReference("group", "2020-01-01"), "name") + original := MakeTypeDefinition(ref, NewObjectType()) + property := NewStringPropertyDefinition("FullName") + + transformed, err := original.ApplyObjectTransformation(func(objectType *ObjectType) (Type, error) { + return objectType.WithProperty(property), nil + }) + + g.Expect(err).To(BeNil()) + + ot, ok := transformed.Type().(*ObjectType) + g.Expect(ok).To(BeTrue()) + g.Expect(ot).NotTo(BeNil()) + + prop, ok := ot.Property("FullName") + g.Expect(ok).To(BeTrue()) + g.Expect(prop).NotTo(BeNil()) + +} + +func TestApplyObjectTransformation_GivenObjectAndTransformationReturningError_ReturnsError(t *testing.T) { + g := NewGomegaWithT(t) + + ref := MakeTypeName(MakeLocalPackageReference("group", "2020-01-01"), "name") + original := MakeTypeDefinition(ref, NewObjectType()) + + transformed, err := original.ApplyObjectTransformation(func(objectType *ObjectType) (Type, error) { + return nil, errors.New("failed") + }) + + g.Expect(transformed).To(BeNil()) + g.Expect(err).NotTo(BeNil()) +} + +func TestApplyObjectTransformation_GivenNonObjectAndTransformation_ReturnsError(t *testing.T) { + g := NewGomegaWithT(t) + + ref := MakeTypeName(MakeLocalPackageReference("group", "2020-01-01"), "name") + original := MakeTypeDefinition(ref, StringType) + property := NewStringPropertyDefinition("FullName") + + transformed, err := original.ApplyObjectTransformation(func(objectType *ObjectType) (Type, error) { + return objectType.WithProperty(property), nil + }) + + g.Expect(transformed).To(BeNil()) + g.Expect(err).NotTo(BeNil()) +} + +/* + * ApplyObjectTransformations() tests + */ + +var ( + fullNameProperty = NewStringPropertyDefinition("FullName") + knownAsProperty = NewStringPropertyDefinition("KnownAs") + + injectFullName = func(objectType *ObjectType) (*ObjectType, error) { + return objectType.WithProperty(fullNameProperty), nil + } + + injectKnownAs = func(objectType *ObjectType) (*ObjectType, error) { + return objectType.WithProperty(knownAsProperty), nil + } + + failingTransform = func(objectType *ObjectType) (*ObjectType, error) { + return nil, errors.New("bang") + } +) + +func TestApplyObjectTransformations_GivenObjectAndTransformations_AppliesTransformations(t *testing.T) { + g := NewGomegaWithT(t) + + ref := MakeTypeName(MakeLocalPackageReference("group", "2020-01-01"), "name") + original := MakeTypeDefinition(ref, NewObjectType()) + + transformed, err := original.ApplyObjectTransformations(injectFullName, injectKnownAs) + + g.Expect(err).To(BeNil()) + + ot, ok := transformed.Type().(*ObjectType) + g.Expect(ok).To(BeTrue()) + g.Expect(ot).NotTo(BeNil()) + + fn, ok := ot.Property("FullName") + g.Expect(ok).To(BeTrue()) + g.Expect(fn).NotTo(BeNil()) + + ka, ok := ot.Property("KnownAs") + g.Expect(ok).To(BeTrue()) + g.Expect(ka).NotTo(BeNil()) +} + +func TestApplyObjectTransformations_GivenObjectAndFirstTransformationReturningError_ReturnsErrorWithoutCallingSecondTransformation(t *testing.T) { + g := NewGomegaWithT(t) + + ref := MakeTypeName(MakeLocalPackageReference("group", "2020-01-01"), "name") + original := MakeTypeDefinition(ref, NewObjectType()) + + transformed, err := original.ApplyObjectTransformations(failingTransform, injectKnownAs) + + g.Expect(transformed).To(BeNil()) + g.Expect(err).NotTo(BeNil()) +} + +func TestApplyObjectTransformations_GivenObjectAndSecondTransformationReturningError_ReturnsErrorFromSecondTransformation(t *testing.T) { + g := NewGomegaWithT(t) + + ref := MakeTypeName(MakeLocalPackageReference("group", "2020-01-01"), "name") + original := MakeTypeDefinition(ref, NewObjectType()) + + transformed, err := original.ApplyObjectTransformations(injectFullName, failingTransform) + + g.Expect(transformed).To(BeNil()) + g.Expect(err).NotTo(BeNil()) +} + +func TestApplyObjectTransformations_GivenNonObjectAndTransformations_ReturnsError(t *testing.T) { + g := NewGomegaWithT(t) + + ref := MakeTypeName(MakeLocalPackageReference("group", "2020-01-01"), "name") + original := MakeTypeDefinition(ref, StringType) + + transformed, err := original.ApplyObjectTransformations(injectFullName, injectKnownAs) + + g.Expect(transformed).To(BeNil()) + g.Expect(err).NotTo(BeNil()) +} diff --git a/hack/generator/pkg/astmodel/type_visitor.go b/hack/generator/pkg/astmodel/type_visitor.go index 924980dca..b1ec3504a 100644 --- a/hack/generator/pkg/astmodel/type_visitor.go +++ b/hack/generator/pkg/astmodel/type_visitor.go @@ -7,7 +7,6 @@ package astmodel import ( "fmt" - "github.com/pkg/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" @@ -27,6 +26,7 @@ type TypeVisitor struct { VisitEnumType func(this *TypeVisitor, it *EnumType, ctx interface{}) (Type, error) VisitResourceType func(this *TypeVisitor, it *ResourceType, ctx interface{}) (Type, error) VisitArmType func(this *TypeVisitor, it *ArmType, ctx interface{}) (Type, error) + VisitStorageType func(this *TypeVisitor, it *StorageType, ctx interface{}) (Type, error) } // Visit invokes the appropriate VisitX on TypeVisitor @@ -58,6 +58,8 @@ func (tv *TypeVisitor) Visit(t Type, ctx interface{}) (Type, error) { return tv.VisitResourceType(tv, it, ctx) case *ArmType: return tv.VisitArmType(tv, it, ctx) + case *StorageType: + return tv.VisitStorageType(tv, it, ctx) } panic(fmt.Sprintf("unhandled type: (%T) %v", t, t)) @@ -85,6 +87,26 @@ func (tv *TypeVisitor) VisitDefinition(td TypeDefinition, ctx interface{}) (*Typ return &def, nil } +func (tv *TypeVisitor) VisitDefinitions(definitions Types, ctx interface{}) (Types, error) { + result := make(Types) + var errs []error + for _, d := range definitions { + def, err := tv.VisitDefinition(d, ctx) + if err != nil { + errs = append(errs, err) + } else { + result[def.Name()] = *def + } + } + + if len(errs) > 0 { + err := kerrors.NewAggregate(errs) + return nil, err + } + + return result, nil +} + // MakeTypeVisitor returns a default (identity transform) visitor, which // visits every type in the tree. If you want to actually do something you will // need to override the properties on the returned TypeVisitor. @@ -104,6 +126,7 @@ func MakeTypeVisitor() TypeVisitor { VisitArmType: IdentityVisitOfArmType, VisitOneOfType: IdentityVisitOfOneOfType, VisitAllOfType: IdentityVisitOfAllOfType, + VisitStorageType: IdentityVisitOfStorageType, } } @@ -147,12 +170,12 @@ func IdentityVisitOfObjectType(this *TypeVisitor, it *ObjectType, ctx interface{ func IdentityVisitOfMapType(this *TypeVisitor, it *MapType, ctx interface{}) (Type, error) { visitedKey, err := this.Visit(it.key, ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to visit map key type %v", it.key) + return nil, errors.Wrapf(err, "failed to visit map key type %T", it.key) } visitedValue, err := this.Visit(it.value, ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to visit map value type %v", it.value) + return nil, errors.Wrapf(err, "failed to visit map value type %T", it.value) } return NewMapType(visitedKey, visitedValue), nil @@ -167,7 +190,7 @@ func IdentityVisitOfEnumType(_ *TypeVisitor, it *EnumType, _ interface{}) (Type, func IdentityVisitOfOptionalType(this *TypeVisitor, it *OptionalType, ctx interface{}) (Type, error) { visitedElement, err := this.Visit(it.element, ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to visit optional element type %v", it.element) + return nil, errors.Wrapf(err, "failed to visit optional element type %T", it.element) } return NewOptionalType(visitedElement), nil @@ -176,29 +199,33 @@ func IdentityVisitOfOptionalType(this *TypeVisitor, it *OptionalType, ctx interf func IdentityVisitOfResourceType(this *TypeVisitor, it *ResourceType, ctx interface{}) (Type, error) { visitedSpec, err := this.Visit(it.spec, ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to visit resource spec type %v", it.spec) + return nil, errors.Wrapf(err, "failed to visit resource spec type %T", it.spec) } visitedStatus, err := this.Visit(it.status, ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to visit resource status type %v", it.status) + return nil, errors.Wrapf(err, "failed to visit resource status type %T", it.status) } return it.WithSpec(visitedSpec).WithStatus(visitedStatus), nil } func IdentityVisitOfArmType(this *TypeVisitor, at *ArmType, ctx interface{}) (Type, error) { - newType, err := this.Visit(&at.objectType, ctx) + nt, err := this.Visit(&at.objectType, ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to visit ARM underlying type %v", at.objectType) + return nil, errors.Wrapf(err, "failed to visit ARM underlying type %T", at.objectType) } - ot, ok := newType.(*ObjectType) - if !ok { - return nil, errors.Errorf("expected transformation of ARM underlying type %v to return ObjectType, not %v", at.objectType, ot) - } + switch newType := nt.(type) { + case *ObjectType: + return NewArmType(*newType), nil + + case *ArmType: + return newType, nil - return MakeArmType(*ot), nil + default: + return nil, errors.Errorf("expected transformation of ARM underlying type %T to return ObjectType or ArmType, not %T", at.objectType, nt) + } } func IdentityVisitOfOneOfType(this *TypeVisitor, it OneOfType, ctx interface{}) (Type, error) { @@ -238,3 +265,21 @@ func IdentityVisitOfAllOfType(this *TypeVisitor, it AllOfType, ctx interface{}) return MakeAllOfType(newTypes...), nil } + +func IdentityVisitOfStorageType(this *TypeVisitor, st *StorageType, ctx interface{}) (Type, error) { + nt, err := this.Visit(&st.objectType, ctx) + if err != nil { + return nil, errors.Wrapf(err, "failed to visit storage type %T", st.objectType) + } + + switch newType := nt.(type) { + case *ObjectType: + return NewStorageType(*newType), nil + + case *StorageType: + return newType, nil + + default: + return nil, errors.Errorf("expected transformation of Storage type %T to return ObjectType, not %T", st.objectType, newType) + } +} diff --git a/hack/generator/pkg/astmodel/types.go b/hack/generator/pkg/astmodel/types.go index b68f8e992..cb33ee205 100644 --- a/hack/generator/pkg/astmodel/types.go +++ b/hack/generator/pkg/astmodel/types.go @@ -93,3 +93,73 @@ func (types Types) Copy() Types { result.AddTypes(types) return result } + +// ResolveResourceType returns the underlying resource type if the definition contains one or names one +func (types Types) ResolveResourceType(aType Type) (*ResourceType, bool) { + switch t := aType.(type) { + + case *ResourceType: + return t, true + + case TypeName: + if def, ok := types[t]; ok { + return types.ResolveResourceType(def.theType) + } + return nil, false + + default: + return nil, false + } +} + +// IsArmType returns true if the passed type is an Arm type or names an Arm type; false otherwise. +func (types Types) IsArmType(aType Type) bool { + switch t := aType.(type) { + case *ArmType: + return true + + case *ResourceType: + return types.IsArmResource(t) + + case TypeName: + if def, ok := types[t]; ok { + return types.IsArmDefinition(&def) + } + return false + + default: + return false + } +} + +// IsArmDefinition returns true if the passed definition is for an Arm type or names an Arm type; false otherwise. +func (types Types) IsArmDefinition(definition *TypeDefinition) bool { + return types.IsArmType(definition.Type()) +} + +// IsArmDefinition returns true if the passed resource contains Arm Types or names Arm types; false otherwise. +func (types Types) IsArmResource(resource *ResourceType) bool { + return types.IsArmType(resource.SpecType()) || types.IsArmType(resource.StatusType()) +} + +// ResolveEnumType returns true if the passed type is an enum type or names an enum type; false otherwise. +func (types Types) ResolveEnumType(aType Type) (EnumType, bool) { + switch t := aType.(type) { + case *EnumType: + return *t, true + + case TypeName: + if def, ok := types[t]; ok { + return types.ResolveEnumDefinition(&def) + } + return EnumType{}, false + + default: + return EnumType{}, false + } +} + +// ResolveEnumDefinition returns true if the passed definition is for an Enum type or names an Enum type; false otherwise. +func (types Types) ResolveEnumDefinition(definition *TypeDefinition) (EnumType, bool) { + return types.ResolveEnumType(definition.Type()) +} diff --git a/hack/generator/pkg/codegen/code_generator.go b/hack/generator/pkg/codegen/code_generator.go index 2263aea90..6359726f5 100644 --- a/hack/generator/pkg/codegen/code_generator.go +++ b/hack/generator/pkg/codegen/code_generator.go @@ -83,6 +83,7 @@ func corePipelineStages(idFactory astmodel.IdentifierFactory, configuration *con createArmTypesAndCleanKubernetesTypes(idFactory), applyKubernetesResourceInterface(idFactory), + createStorageTypes(), simplifyDefinitions(), // Safety checks at the end: @@ -98,13 +99,14 @@ func (generator *CodeGenerator) Generate(ctx context.Context) error { defs := make(astmodel.Types) for i, stage := range generator.pipeline { klog.V(0).Infof("Pipeline stage %d/%d: %s", i+1, len(generator.pipeline), stage.description) - updatedDefs, err := stage.Action(ctx, defs) + // Defensive copy (in case the pipeline modifies its inputs) so that we can compare types in vs out + defsOut, err := stage.Action(ctx, defs.Copy()) if err != nil { return errors.Wrapf(err, "Failed during pipeline stage %d/%d: %s", i+1, len(generator.pipeline), stage.description) } - defsAdded := updatedDefs.Except(defs) - defsRemoved := defs.Except(updatedDefs) + defsAdded := defsOut.Except(defs) + defsRemoved := defs.Except(defsOut) if len(defsAdded) > 0 && len(defsRemoved) > 0 { klog.V(1).Infof("Added %d, removed %d type definitions", len(defsAdded), len(defsRemoved)) @@ -114,7 +116,7 @@ func (generator *CodeGenerator) Generate(ctx context.Context) error { klog.V(1).Infof("Removed %d type definitions", len(defsRemoved)) } - defs = updatedDefs + defs = defsOut } return nil diff --git a/hack/generator/pkg/codegen/codegen_test.go b/hack/generator/pkg/codegen/codegen_test.go index f6a66409e..4e57ac3e6 100644 --- a/hack/generator/pkg/codegen/codegen_test.go +++ b/hack/generator/pkg/codegen/codegen_test.go @@ -120,9 +120,9 @@ func runGoldenTest(t *testing.T, path string) { for _, stage := range codegen.pipeline { if stage.HasId("loadSchema") { pipeline = append(pipeline, loadTestSchemaIntoTypes(idFactory, cfg, testSchemaLoader)) - } else if stage.HasId("deleteGenerated") { - continue // Skip this - } else if stage.HasId("rogueCheck") { + } else if stage.HasId("deleteGenerated") || + stage.HasId("rogueCheck") || + stage.HasId("createStorage") { continue // Skip this } else if stage.HasId("exportPackages") { pipeline = append(pipeline, exportPackagesTestPipelineStage) diff --git a/hack/generator/pkg/codegen/pipeline_apply_kubernetes_resource_interface.go b/hack/generator/pkg/codegen/pipeline_apply_kubernetes_resource_interface.go index f26e27af0..a114a2036 100644 --- a/hack/generator/pkg/codegen/pipeline_apply_kubernetes_resource_interface.go +++ b/hack/generator/pkg/codegen/pipeline_apply_kubernetes_resource_interface.go @@ -21,24 +21,25 @@ func applyKubernetesResourceInterface(idFactory astmodel.IdentifierFactory) Pipe result := make(astmodel.Types) for _, typeDef := range types { + if resource, ok := typeDef.Type().(*astmodel.ResourceType); ok { specName, ok := resource.SpecType().(astmodel.TypeName) if !ok { - return nil, errors.Errorf("Resource %q spec was not of type TypeName, instead: %T", typeDef.Name(), typeDef.Type()) + return nil, errors.Errorf("resource %q spec was not of type TypeName, instead: %T", typeDef.Name(), typeDef.Type()) } spec, ok := types[specName] if !ok { - return nil, errors.Errorf("Couldn't find resource spec %q", specName) + return nil, errors.Errorf("couldn't find resource spec %q", specName) } - specObject, ok := spec.Type().(*astmodel.ObjectType) - if !ok { - return nil, errors.Errorf("Spec %q was not of type ObjectType, instead %T", specName, spec.Type()) + specObj, err := astmodel.TypeAsObjectType(spec.Type()) + if err != nil { + return nil, errors.Wrapf(err, "resource spec %q did not contain an object", specName) } - iface, err := astmodel.NewKubernetesResourceInterfaceImpl(idFactory, specObject) + iface, err := astmodel.NewKubernetesResourceInterfaceImpl(idFactory, specObj) if err != nil { return nil, errors.Wrapf(err, "Couldn't implement Kubernetes resource interface for %q", spec.Name()) } diff --git a/hack/generator/pkg/codegen/pipeline_create_arm_types.go b/hack/generator/pkg/codegen/pipeline_create_arm_types.go index 6ca413186..8a7a48054 100644 --- a/hack/generator/pkg/codegen/pipeline_create_arm_types.go +++ b/hack/generator/pkg/codegen/pipeline_create_arm_types.go @@ -200,37 +200,6 @@ func removeValidations(t *astmodel.ObjectType) (*astmodel.ObjectType, error) { return t, nil } -type conversionHandler = func(t *astmodel.ObjectType) (*astmodel.ObjectType, error) - -func transformTypeDefinition( - def astmodel.TypeDefinition, - handlers []conversionHandler) (astmodel.TypeDefinition, error) { - - if !astmodel.IsObjectDefinition(def) && !astmodel.IsArmDefinition(def) { - return astmodel.TypeDefinition{}, errors.Errorf("input type %q (%T) did not contain expected type", def.Name(), def.Type()) - } - - visitor := astmodel.MakeTypeVisitor() - visitor.VisitObjectType = func(this *astmodel.TypeVisitor, it *astmodel.ObjectType, ctx interface{}) (astmodel.Type, error) { - resultType := it - var err error - for _, handler := range handlers { - resultType, err = handler(resultType) - if err != nil { - return nil, err - } - } - return resultType, nil - } - - transformed, err := visitor.VisitDefinition(def, nil) - if err != nil { - return astmodel.TypeDefinition{}, errors.Wrapf(err, "failed to transform type definition %q", def.Name()) - } - - return *transformed, nil -} - func getResourceSpecDefinition( definitions astmodel.Types, resourceType *astmodel.ResourceType) (astmodel.TypeDefinition, error) { @@ -243,7 +212,7 @@ func getResourceSpecDefinition( resourceSpecDef, ok := definitions[specName] if !ok { - return astmodel.TypeDefinition{}, errors.Errorf("couldn't find spec") + return astmodel.TypeDefinition{}, errors.Errorf("couldn't find spec %v", specName) } // preserve outer spec name @@ -269,18 +238,19 @@ func createArmResourceSpecDefinition( } // ARM specs have a special interface that they need to implement, go ahead and create that here - armSpecObj, ok := armTypeDef.Type().(*astmodel.ObjectType) + spec, ok := armTypeDef.Type().(*astmodel.ArmType) if !ok { - return emptyDef, emptyName, errors.Errorf("Arm spec %q isn't of type ObjectType, instead: %T", armTypeDef.Name(), armTypeDef.Type()) + return emptyDef, emptyName, errors.Errorf("Arm spec %q isn't of type ArmType, instead: %T", armTypeDef.Name(), armTypeDef.Type()) } - iface, err := astmodel.NewArmSpecInterfaceImpl(idFactory, armSpecObj) + specObj := spec.ObjectType() + iface, err := astmodel.NewArmSpecInterfaceImpl(idFactory, &specObj) if err != nil { return emptyDef, emptyName, err } - updatedSpec := armSpecObj.WithInterface(iface) - armTypeDef = armTypeDef.WithType(updatedSpec) + updatedSpec := specObj.WithInterface(iface) + armTypeDef = armTypeDef.WithType(astmodel.NewArmType(*updatedSpec)) return armTypeDef, resourceSpecDef.Name(), nil } @@ -290,15 +260,23 @@ func createArmTypeDefinition(definitions astmodel.Types, def astmodel.TypeDefini return convertPropertiesToArmTypes(t, definitions) } - armDef, err := transformTypeDefinition( - // This type is the ARM type so give it the ARM name - def.WithName(astmodel.CreateArmTypeName(def.Name())), - []conversionHandler{removeValidations, convertPropertiesToArmTypesWrapper}) + armName := astmodel.CreateArmTypeName(def.Name()) + armDef, err := def.WithName(armName). + ApplyObjectTransformations(removeValidations, convertPropertiesToArmTypesWrapper) if err != nil { - return astmodel.TypeDefinition{}, err + return astmodel.TypeDefinition{}, + errors.Wrapf(err, "creating ARM prototype %v from Kubernetes definition %v", armName, def.Name()) } - return armDef, nil + result, err := armDef.ApplyObjectTransformation(func(objectType *astmodel.ObjectType) (astmodel.Type, error) { + return astmodel.NewArmType(*objectType), nil + }) + if err != nil { + return astmodel.TypeDefinition{}, + errors.Wrapf(err, "creating ARM definition %v from Kubernetes definition %v", armName, def.Name()) + } + + return *result, nil } func modifyKubeResourceSpecDefinition( @@ -311,7 +289,7 @@ func modifyKubeResourceSpecDefinition( return astmodel.TypeDefinition{}, err } - createOwnerProperty := func(t *astmodel.ObjectType) (*astmodel.ObjectType, error) { + injectOwnerProperty := func(t *astmodel.ObjectType) (*astmodel.ObjectType, error) { if resourceType.Owner() != nil { ownerField, err := createOwnerProperty(idFactory, resourceType.Owner()) if err != nil { @@ -323,13 +301,8 @@ func modifyKubeResourceSpecDefinition( return t, nil } - kubePropertyRemapper := func(t *astmodel.ObjectType) (*astmodel.ObjectType, error) { - var hasName bool - for _, prop := range t.Properties() { - if prop.PropertyName() == "Name" { - hasName = true - } - } + remapProperties := func(t *astmodel.ObjectType) (*astmodel.ObjectType, error) { + _, hasName := t.Property("Name") // TODO: Right now the Kubernetes type has all of its standard requiredness (validations). If we want to allow // TODO: users to submit "just a name and owner" types we will have to strip some validation until @@ -342,14 +315,12 @@ func modifyKubeResourceSpecDefinition( return kubernetesType, nil } - kubernetesDef, err := transformTypeDefinition( - resourceSpecDef, - []conversionHandler{kubePropertyRemapper, createOwnerProperty}) + kubernetesDef, err := resourceSpecDef.ApplyObjectTransformations(remapProperties, injectOwnerProperty) if err != nil { - return astmodel.TypeDefinition{}, err + return astmodel.TypeDefinition{}, errors.Wrapf(err, "remapping properties of Kubernetes definition") } - return kubernetesDef, nil + return *kubernetesDef, nil } func addArmConversionInterface( @@ -364,17 +335,23 @@ func addArmConversionInterface( return emptyDef, errors.Errorf("ARM definition %q did not define an object type", armDef.Name()) } - addInterfaceHandler := func(t *astmodel.ObjectType) (*astmodel.ObjectType, error) { - return t.WithInterface(armconversion.NewArmTransformerImpl( + addInterfaceHandler := func(t *astmodel.ObjectType) (astmodel.Type, error) { + result := t.WithInterface(armconversion.NewArmTransformerImpl( armDef.Name(), objectType, idFactory, - isResource)), nil + isResource)) + return result, nil + } + + result, err := kubeDef.ApplyObjectTransformation(addInterfaceHandler) + if err != nil { + emptyDef := astmodel.TypeDefinition{} + return emptyDef, + errors.Errorf("Failed to add ARM conversion interface to Kubenetes object definition %v", armDef.Name()) } - return transformTypeDefinition( - kubeDef, - []conversionHandler{addInterfaceHandler}) + return *result, nil } func convertArmPropertyTypeIfNeeded(definitions astmodel.Types, t astmodel.Type) (astmodel.Type, error) { diff --git a/hack/generator/pkg/codegen/pipeline_create_storage_types.go b/hack/generator/pkg/codegen/pipeline_create_storage_types.go new file mode 100644 index 000000000..eec4439a9 --- /dev/null +++ b/hack/generator/pkg/codegen/pipeline_create_storage_types.go @@ -0,0 +1,233 @@ +/* + * Copyright (c) Microsoft Corporation. + * Licensed under the MIT license. + */ + +package codegen + +import ( + "context" + "fmt" + "github.com/Azure/k8s-infra/hack/generator/pkg/astmodel" + kerrors "k8s.io/apimachinery/pkg/util/errors" +) + +// createStorageTypes returns a pipeline stage that creates dedicated storage types for each resource and nested object. +// Storage versions are created for *all* API versions to allow users of older versions of the operator to easily +// upgrade. This is of course a bit odd for the first release, but defining the approach from day one is useful. +func createStorageTypes() PipelineStage { + return PipelineStage{ + id: "createStorage", + description: "Create storage versions of CRD types", + Action: func(ctx context.Context, types astmodel.Types) (astmodel.Types, error) { + + storageTypes := make(astmodel.Types) + visitor := makeStorageTypesVisitor(types) + vc := makeStorageTypesVisitorContext() + var errs []error + for _, d := range types { + d := d + + if types.IsArmDefinition(&d) { + // Skip ARM definitions, we don't need to create storage variants of those + continue + } + + if _, ok := types.ResolveEnumDefinition(&d); ok { + // Skip Enum definitions as we use the base type for storage + continue + } + + def, err := visitor.VisitDefinition(d, vc) + if err != nil { + errs = append(errs, err) + continue + } + + finalDef := def.WithDescription(descriptionForStorageVariant(d)) + storageTypes[finalDef.Name()] = finalDef + } + + if len(errs) > 0 { + err := kerrors.NewAggregate(errs) + return nil, err + } + + types.AddTypes(storageTypes) + + return types, nil + }, + } +} + +// makeStorageTypesVisitor returns a TypeVisitor to do the creation of dedicated storage types +func makeStorageTypesVisitor(types astmodel.Types) astmodel.TypeVisitor { + factory := &StorageTypeFactory{ + types: types, + } + + result := astmodel.MakeTypeVisitor() + result.VisitTypeName = factory.visitTypeName + result.VisitObjectType = factory.visitObjectType + result.VisitArmType = factory.visitArmType + + factory.visitor = result + factory.propertyConversions = []propertyConversion{ + factory.preserveKubernetesResourceStorageProperties, + factory.convertPropertiesForStorage, + } + + return result +} + +type StorageTypeFactory struct { + types astmodel.Types + propertyConversions []propertyConversion + visitor astmodel.TypeVisitor +} + +// A property conversion accepts a property definition and optionally applies a conversion to make +// the property suitable for use on a storage type. Conversions return nil if they decline to +// convert, deferring the conversion to another. +type propertyConversion = func(property *astmodel.PropertyDefinition, ctx StorageTypesVisitorContext) (*astmodel.PropertyDefinition, error) + +func (factory *StorageTypeFactory) visitTypeName(_ *astmodel.TypeVisitor, name astmodel.TypeName, ctx interface{}) (astmodel.Type, error) { + visitorContext := ctx.(StorageTypesVisitorContext) + + // Resolve the type name to the actual referenced type + actualDefinition, actualDefinitionFound := factory.types[name] + + // Check for property specific handling + if visitorContext.property != nil && actualDefinitionFound { + if et, ok := actualDefinition.Type().(*astmodel.EnumType); ok { + // Property type refers to an enum, so we use the base type instead + return et.BaseType(), nil + } + } + + // Map the type name into our storage namespace + localRef, ok := name.PackageReference.AsLocalPackage() + if !ok { + return name, nil + } + + storageRef := astmodel.MakeStoragePackageReference(localRef) + visitedName := astmodel.MakeTypeName(storageRef, name.Name()) + return visitedName, nil +} + +func (factory *StorageTypeFactory) visitObjectType( + _ *astmodel.TypeVisitor, + object *astmodel.ObjectType, + ctx interface{}) (astmodel.Type, error) { + visitorContext := ctx.(StorageTypesVisitorContext) + objectContext := visitorContext.forObject(object) + + var errs []error + properties := object.Properties() + for i, prop := range properties { + p, err := factory.makeStorageProperty(prop, objectContext) + if err != nil { + errs = append(errs, err) + } else { + properties[i] = p + } + } + + if len(errs) > 0 { + err := kerrors.NewAggregate(errs) + return nil, err + } + + objectType := astmodel.NewObjectType().WithProperties(properties...) + return astmodel.NewStorageType(*objectType), nil +} + +// makeStorageProperty applies a conversion to make a variant of the property for use when +// serializing to storage +func (factory *StorageTypeFactory) makeStorageProperty( + prop *astmodel.PropertyDefinition, + objectContext StorageTypesVisitorContext) (*astmodel.PropertyDefinition, error) { + for _, conv := range factory.propertyConversions { + p, err := conv(prop, objectContext.forProperty(prop)) + if err != nil { + // Something went wrong, return the error + return nil, err + } + if p != nil { + // We have the conversion we need, return it promptly + return p, nil + } + } + + return nil, fmt.Errorf("failed to find a conversion for property %v", prop.PropertyName()) +} + +// preserveKubernetesResourceStorageProperties preserves properties required by the KubernetesResource interface as they're always required +func (factory *StorageTypeFactory) preserveKubernetesResourceStorageProperties( + prop *astmodel.PropertyDefinition, + _ StorageTypesVisitorContext) (*astmodel.PropertyDefinition, error) { + if prop.HasName(astmodel.AzureNameProperty) || + prop.HasName(astmodel.OwnerProperty) { + // Keep these unchanged + return prop, nil + } + + // No opinion, defer to another conversion + return nil, nil +} + +func (factory *StorageTypeFactory) convertPropertiesForStorage( + prop *astmodel.PropertyDefinition, + objectContext StorageTypesVisitorContext) (*astmodel.PropertyDefinition, error) { + propertyType, err := factory.visitor.Visit(prop.PropertyType(), objectContext) + if err != nil { + return nil, err + } + + p := prop.WithType(propertyType). + MakeOptional(). + WithoutValidation(). + WithDescription("") + + return p, nil +} + +func (factory *StorageTypeFactory) visitArmType( + _ *astmodel.TypeVisitor, + armType *astmodel.ArmType, + _ interface{}) (astmodel.Type, error) { + // We don't want to do anything with ARM types + return armType, nil +} + +func descriptionForStorageVariant(definition astmodel.TypeDefinition) []string { + pkg := definition.Name().PackageReference.PackageName() + + result := []string{ + fmt.Sprintf("Storage version of %v.%v", pkg, definition.Name().Name()), + } + result = append(result, definition.Description()...) + + return result +} + +type StorageTypesVisitorContext struct { + object *astmodel.ObjectType + property *astmodel.PropertyDefinition +} + +func makeStorageTypesVisitorContext() StorageTypesVisitorContext { + return StorageTypesVisitorContext{} +} + +func (context StorageTypesVisitorContext) forObject(object *astmodel.ObjectType) StorageTypesVisitorContext { + context.object = object + context.property = nil + return context +} + +func (context StorageTypesVisitorContext) forProperty(property *astmodel.PropertyDefinition) StorageTypesVisitorContext { + context.property = property + return context +} diff --git a/scripts/deploy_testing_secret.sh b/scripts/deploy_testing_secret.sh new file mode 100755 index 000000000..6b5cfbc5d --- /dev/null +++ b/scripts/deploy_testing_secret.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +set -o errexit +set -o nounset +set -o pipefail + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. + +bins="${REPO_ROOT}/hack/tools/bin" +k="$bins/kubectl" + +cat <