From 7c02ec5607e5f2465c8ce4843289fba7399753d5 Mon Sep 17 00:00:00 2001 From: Janani Vasudevan <49576785+jananivMS@users.noreply.github.com> Date: Tue, 3 Sep 2019 14:57:53 -0600 Subject: [PATCH] Pull and merge changes from master into Azure-sql (#182) * feat: implement keyvault controller * Ace's KV changes with updates * Added an event for the final successful provisioning * Updated changes based on the PR comments * removing unwanted file * making resource group name the one in the keyvault yaml * need to handled unexpected error types...like validation.error (#111) * refactor tests (#90) * improve tests with parallel execution and rm sleep * fix the tests to run on kindcluster * Updates to KV controller from Ace (#80) (#112) * feat: implement keyvault controller * Ace's KV changes with updates * Added an event for the final successful provisioning * Updated changes based on the PR comments * removing unwanted file * making resource group name the one in the keyvault yaml Co-authored-by: Ace Eldeib * Test update (#115) * this needs to exist in the reconciler in order to use controllerutil createorupdate * Feat/add consumer group kind (#117) * add consumer group kind * update tests for consumer group * fix isbeingdeleted * Updates to README - steps for onboarding (#114) * cluster additions * updated docs * Update azure-pipelines.yaml (#119) * Update azure-pipelines.yaml * fix tests (#140) * revert back // +kubebuilder:subresource:status changes - fix broken tests * Devcontainer to Help Onboard New People (#142) * add dev conatiner - wip * DevContainer up and running. * Removed `sleep 80` and replaced with `kubectl wait`. * Run `make set-kindcluster` from docker-compose. * Set timeout on wait. * Added `install-test-dependency` to makefile and dockerfile. * Update README - Create SP with contribution rights. * Updated README with details on using devcontainer. * Stuff that wanted me to commit. * Reverted changes made to `docker-build` in Makefile. * pass future where possible instead of bool (#121) * first commit on Amanda's branch * first * before properties * test not tested * test works * unit tests work, needs firewall rules * addresses feedback * erin's feedback * janani's change, pass future * async works much better now * janani feedback * screwed up interface prototype * randomize the resources names used in tests (#152) * Ability to Set SecretName When Creating Event Hub (#151) * Updated eventhub_types - Added `secretName`. * Added `secretName` to sample manifest. * Set secret name to `secretName` if set, otherwise use eventhub name. * Updated Makefile to update Azure Operator. Also added the ability to rebuild image without cache. * Updated README on how to update the Azure Operator. * Updated CRD with SecretName description. * Added tests to ensure `SecretName` was being used if present. * Fix test. * Pr 22 merge (#158) * kubebuilder init --domain azure --license none * kubebuilder create api --group service --version v1alpha1 --kind Storage * kubebuilder create api --group service --version v1alpha1 --kind CosmosDB * Add MIT License * Initial codes to support Azure Storage Account * Add development docs * Remove the storage account name from the spec * Sync additional resources for Azure storage account 1. Create a secret based on storage account credentials 2. Add the global config * Upgrade kubebuilder to 2.0.0-beta.0 and controller-runtime to v0.2.0-beta.4 * Copy pkg in Dockerfile * Update controller-gen and make manifests * Add prefix "Storage" for storage_types * feature: add redis cache service * Ignore the NotFound error when deleting resources * Requeue the request if the deployment is not complete * feature: add cosmosdb service * Refine the logic of updating additional resources and output * Deploy operator on a remote cluster * add a sample app deployment yaml * Generate assets for the templates * Requeue after 30 seconds to avoid too many requests Ignore the NotFound error when deleting cosmosdb * Fix a bug of missing capacity of rediscache template * fix: judge whether resources need to be updated With adding generation in status, we can judge whether resources need to be updated. Co-authored-by: Bin Xia * Add docs to run the demo * Update manager-role to operate secrets Workaround: the rule should be appended. But I don't know how for now. The workaround is to copy config/rbac/role.yaml and add the new rule. Should be fixed in future. * fix(Makefile): rename the target from "generate" to "generate-template" to avoid conflict * Refactoring data focused operators. Storage currently working though it needs cleanup * Added deepcopy generated code * CosmosDB deploy working * Detailing current implementation of CosmosDB Create parameters * Removing TestTags * Redis cache now deploys * Cleaned up code and removed references to v1alpha1 * Updating controllers logging calls Co-authored-by: Chris Risner Co-authored-by: Bin Xia * Pr 22 merge (#158) * kubebuilder init --domain azure --license none * kubebuilder create api --group service --version v1alpha1 --kind Storage * kubebuilder create api --group service --version v1alpha1 --kind CosmosDB * Add MIT License * Initial codes to support Azure Storage Account * Add development docs * Remove the storage account name from the spec * Sync additional resources for Azure storage account 1. Create a secret based on storage account credentials 2. Add the global config * Upgrade kubebuilder to 2.0.0-beta.0 and controller-runtime to v0.2.0-beta.4 * Copy pkg in Dockerfile * Update controller-gen and make manifests * Add prefix "Storage" for storage_types * feature: add redis cache service * Ignore the NotFound error when deleting resources * Requeue the request if the deployment is not complete * feature: add cosmosdb service * Refine the logic of updating additional resources and output * Deploy operator on a remote cluster * add a sample app deployment yaml * Generate assets for the templates * Requeue after 30 seconds to avoid too many requests Ignore the NotFound error when deleting cosmosdb * Fix a bug of missing capacity of rediscache template * fix: judge whether resources need to be updated With adding generation in status, we can judge whether resources need to be updated. Co-authored-by: Bin Xia * Add docs to run the demo * Update manager-role to operate secrets Workaround: the rule should be appended. But I don't know how for now. The workaround is to copy config/rbac/role.yaml and add the new rule. Should be fixed in future. * fix(Makefile): rename the target from "generate" to "generate-template" to avoid conflict * Refactoring data focused operators. Storage currently working though it needs cleanup * Added deepcopy generated code * CosmosDB deploy working * Detailing current implementation of CosmosDB Create parameters * Removing TestTags * Redis cache now deploys * Cleaned up code and removed references to v1alpha1 * Updating controllers logging calls Co-authored-by: Chris Risner Co-authored-by: Bin Xia * Pr 22 merge (#158) (#165) * kubebuilder init --domain azure --license none * kubebuilder create api --group service --version v1alpha1 --kind Storage * kubebuilder create api --group service --version v1alpha1 --kind CosmosDB * Add MIT License * Initial codes to support Azure Storage Account * Add development docs * Remove the storage account name from the spec * Sync additional resources for Azure storage account 1. Create a secret based on storage account credentials 2. Add the global config * Upgrade kubebuilder to 2.0.0-beta.0 and controller-runtime to v0.2.0-beta.4 * Copy pkg in Dockerfile * Update controller-gen and make manifests * Add prefix "Storage" for storage_types * feature: add redis cache service * Ignore the NotFound error when deleting resources * Requeue the request if the deployment is not complete * feature: add cosmosdb service * Refine the logic of updating additional resources and output * Deploy operator on a remote cluster * add a sample app deployment yaml * Generate assets for the templates * Requeue after 30 seconds to avoid too many requests Ignore the NotFound error when deleting cosmosdb * Fix a bug of missing capacity of rediscache template * fix: judge whether resources need to be updated With adding generation in status, we can judge whether resources need to be updated. Co-authored-by: Bin Xia * Add docs to run the demo * Update manager-role to operate secrets Workaround: the rule should be appended. But I don't know how for now. The workaround is to copy config/rbac/role.yaml and add the new rule. Should be fixed in future. * fix(Makefile): rename the target from "generate" to "generate-template" to avoid conflict * Refactoring data focused operators. Storage currently working though it needs cleanup * Added deepcopy generated code * CosmosDB deploy working * Detailing current implementation of CosmosDB Create parameters * Removing TestTags * Redis cache now deploys * Cleaned up code and removed references to v1alpha1 * Updating controllers logging calls Co-authored-by: Chris Risner Co-authored-by: Bin Xia * Capture EventHub to Azure Blob Storage Container (#146) * added eventhub with and without capture * create, delete and get properties for storage manager * capture eventhub tests * added storage tests to make tests * configured location to default set by environment variable * synchronised test setup and teardown * incorporated storages module * fixed setup and teardown of storage tests * fixed storage container tests * Camelcase EventHub (#176) * Removed ports from docker-compose. * Updated CRD - camelcase over lowercase. * Updated example manifests. * Role thing. * Camelcase new changes to EventHub types. * Camelcase example. * Removed old file. * Fixing issues #173 and #174 (#175) * Updated controllers to use `azure.microsoft.com` over `service.azure`. * Updated webhooks to point to `azure.microsoft.com`. * Updated caninject to point to `azure.microsoft.com`. * Regenerated role.yaml. * Point kustomization.yaml in CRD to right base CRDs. * Updated demo. * Role update. * Update group from service to azure in PROJECT. * Increased Partition Count Minimum in EventHub to 2 (#178) * Increase minimum partition count to 2. * Updated the CRD. * Updated eventhub example. * Changed resource group example. * Increased test partition count to 2. * Updated tests. --- .envrc | 1 + .gitignore | 3 + Makefile | 24 +- PROJECT | 9 + api/v1/cosmosdb_types.go | 136 +++++ api/v1/eventhub_types.go | 52 +- api/v1/eventhub_types_test.go | 70 ++- api/v1/eventhubnamespace_types.go | 8 +- api/v1/eventhubnamespace_types_test.go | 2 +- api/v1/rediscache_types.go | 131 +++++ api/v1/resourcegroup_types_test.go | 2 +- api/v1/storage_types.go | 136 +++++ api/v1/suite_test.go | 6 + api/v1/zz_generated.deepcopy.go | 495 ++++++++++++++++++ .../bases/azure.microsoft.com_cosmosdbs.yaml | 465 ++++++++++++++++ ...zure.microsoft.com_eventhubnamespaces.yaml | 8 +- .../bases/azure.microsoft.com_eventhubs.yaml | 69 ++- .../azure.microsoft.com_rediscaches.yaml | 470 +++++++++++++++++ .../bases/azure.microsoft.com_storages.yaml | 493 +++++++++++++++++ config/crd/kustomization.yaml | 3 + .../crd/patches/cainjection_in_cosmosdbs.yaml | 8 + .../patches/cainjection_in_rediscaches.yaml | 8 + .../crd/patches/cainjection_in_storages.yaml | 8 + config/crd/patches/webhook_in_cosmosdbs.yaml | 17 + .../crd/patches/webhook_in_rediscaches.yaml | 17 + config/crd/patches/webhook_in_storages.yaml | 17 + config/default/manager_image_patch.yaml-e | 36 -- config/default/manager_role_patch.yaml | 80 +++ config/rbac/role.yaml | 121 +++-- config/samples/.gitkeep | 0 config/samples/azure_v1_cosmosdb.yaml | 10 + config/samples/azure_v1_eventhub.yaml | 8 +- config/samples/azure_v1_eventhub_capture.yaml | 29 + .../samples/azure_v1_eventhubnamespace.yaml | 8 +- config/samples/azure_v1_rediscache.yaml | 13 + config/samples/azure_v1_storage.yaml | 12 + controllers/consumergroup_controller_test.go | 1 - controllers/cosmosdb_controller.go | 259 +++++++++ controllers/eventhub_controller.go | 37 +- controllers/eventhub_controller_test.go | 2 +- .../eventhubnamespace_controller_test.go | 2 - controllers/rediscache_controller.go | 260 +++++++++ controllers/resourcegroup_controller_test.go | 2 +- controllers/sqlserver_controller.go | 2 +- controllers/storage_controller.go | 269 ++++++++++ controllers/suite_test.go | 50 +- docs/development.md | 122 +++++ examples/demo/azure-vote-app-redis.yaml | 65 +++ hack/boilerplate.go.txt | 2 +- main.go | 44 +- pkg/client/deployment/deployment.go | 41 ++ pkg/client/group/group.go | 39 ++ pkg/config/config.go | 56 ++ pkg/helpers/deployment.go | 36 ++ pkg/helpers/helpers.go | 43 ++ pkg/helpers/secret.go | 49 ++ pkg/helpers/service.go | 57 ++ pkg/helpers/types.go | 6 + pkg/iam/authorizers.go | 58 ++ pkg/resourcemanager/cosmosdbs/cosmosdbs.go | 104 ++++ .../eventhubs/consumergroup_test.go | 2 +- pkg/resourcemanager/eventhubs/hub.go | 18 +- pkg/resourcemanager/eventhubs/hub_test.go | 4 +- pkg/resourcemanager/eventhubs/suite_test.go | 4 +- .../rediscaches/rediscaches.go | 85 +++ .../resourcegroups/resourcegroup_test.go | 5 +- .../resourcegroups/suite_test.go | 2 +- .../sqlclient/endtoend_test.go | 1 + .../sqlclient/sqlproperties.go | 3 + .../storages/blob_containers.go | 84 +++ .../storages/blob_containers_test.go | 81 +++ pkg/resourcemanager/storages/storages.go | 91 ++++ pkg/resourcemanager/storages/storages_test.go | 79 +++ pkg/resourcemanager/storages/suite_test.go | 78 +++ pkg/template/assets/cosmosdb.json | 47 ++ pkg/template/assets/rediscache.json | 55 ++ pkg/template/assets/storage.json | 63 +++ pkg/template/templates.go | 281 ++++++++++ 78 files changed, 5399 insertions(+), 165 deletions(-) create mode 100644 .envrc create mode 100644 api/v1/cosmosdb_types.go create mode 100644 api/v1/rediscache_types.go create mode 100644 api/v1/storage_types.go create mode 100644 config/crd/bases/azure.microsoft.com_cosmosdbs.yaml create mode 100644 config/crd/bases/azure.microsoft.com_rediscaches.yaml create mode 100644 config/crd/bases/azure.microsoft.com_storages.yaml create mode 100644 config/crd/patches/cainjection_in_cosmosdbs.yaml create mode 100644 config/crd/patches/cainjection_in_rediscaches.yaml create mode 100644 config/crd/patches/cainjection_in_storages.yaml create mode 100644 config/crd/patches/webhook_in_cosmosdbs.yaml create mode 100644 config/crd/patches/webhook_in_rediscaches.yaml create mode 100644 config/crd/patches/webhook_in_storages.yaml delete mode 100644 config/default/manager_image_patch.yaml-e create mode 100644 config/default/manager_role_patch.yaml create mode 100644 config/samples/.gitkeep create mode 100644 config/samples/azure_v1_cosmosdb.yaml create mode 100644 config/samples/azure_v1_eventhub_capture.yaml create mode 100644 config/samples/azure_v1_rediscache.yaml create mode 100644 config/samples/azure_v1_storage.yaml create mode 100644 controllers/cosmosdb_controller.go create mode 100644 controllers/rediscache_controller.go create mode 100644 controllers/storage_controller.go create mode 100644 docs/development.md create mode 100644 examples/demo/azure-vote-app-redis.yaml create mode 100644 pkg/client/deployment/deployment.go create mode 100644 pkg/client/group/group.go create mode 100644 pkg/config/config.go create mode 100644 pkg/helpers/deployment.go create mode 100644 pkg/helpers/helpers.go create mode 100644 pkg/helpers/secret.go create mode 100644 pkg/helpers/service.go create mode 100644 pkg/helpers/types.go create mode 100644 pkg/iam/authorizers.go create mode 100644 pkg/resourcemanager/cosmosdbs/cosmosdbs.go create mode 100644 pkg/resourcemanager/rediscaches/rediscaches.go create mode 100644 pkg/resourcemanager/storages/blob_containers.go create mode 100644 pkg/resourcemanager/storages/blob_containers_test.go create mode 100644 pkg/resourcemanager/storages/storages.go create mode 100644 pkg/resourcemanager/storages/storages_test.go create mode 100644 pkg/resourcemanager/storages/suite_test.go create mode 100644 pkg/template/assets/cosmosdb.json create mode 100644 pkg/template/assets/rediscache.json create mode 100644 pkg/template/assets/storage.json create mode 100644 pkg/template/templates.go diff --git a/.envrc b/.envrc new file mode 100644 index 00000000000..37b10962dbc --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +export GO111MODULE=on diff --git a/.gitignore b/.gitignore index 12c5e121e94..53a14efc09c 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,9 @@ default.etcd # Output of the go coverage tool, specifically when used with LiteIDE *.out + +bin/ +vendor/ coverage.txt report.xml cover.html diff --git a/Makefile b/Makefile index cc7088e4478..6ffedc4c3ef 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,13 @@ # Image URL to use all building/pushing image targets + + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + IMG ?= controller:latest # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) CRD_OPTIONS ?= "crd:trivialVersions=true" @@ -7,12 +16,12 @@ all: manager # Run tests test: generate fmt vet manifests - TEST_USE_EXISTING_CLUSTER=false go test -v -coverprofile=coverage.txt -covermode count ./api/... ./controllers/... ./pkg/resourcemanager/eventhubs/... ./pkg/resourcemanager/resourcegroups/... 2>&1 | tee testlogs.txt + TEST_USE_EXISTING_CLUSTER=false go test -v -coverprofile=coverage.txt -covermode count ./api/... ./controllers/... ./pkg/resourcemanager/eventhubs/... ./pkg/resourcemanager/resourcegroups/... ./pkg/resourcemanager/storages/... 2>&1 | tee testlogs.txt go-junit-report < testlogs.txt > report.xml go tool cover -html=coverage.txt -o cover.html # Run tests with existing cluster test-existing: generate fmt vet manifests - TEST_USE_EXISTING_CLUSTER=true go test -test.parallel 3 -v -coverprofile=coverage-existing.txt -covermode count ./api/... ./controllers/... ./pkg/resourcemanager/eventhubs/... ./pkg/resourcemanager/resourcegroups/... 2>&1 | tee testlogs-existing.txt + TEST_USE_EXISTING_CLUSTER=true go test -v -coverprofile=coverage-existing.txt -covermode count ./api/... ./controllers/... ./pkg/resourcemanager/eventhubs/... ./pkg/resourcemanager/resourcegroups/... ./pkg/resourcemanager/storages/... 2>&1 | tee testlogs-existing.txt go-junit-report < testlogs-existing.txt > report-existing.xml go tool cover -html=coverage-existing.txt -o cover-existing.html @@ -70,6 +79,9 @@ docker-build: docker-push: docker push ${IMG} +# Build and Push the docker image +build-and-push: docker-build docker-push + # find or download controller-gen # download controller-gen if necessary controller-gen: @@ -80,6 +92,14 @@ else CONTROLLER_GEN=$(shell which controller-gen) endif +.PHONY: install-bindata +install-bindata: + go get -u github.com/jteeuwen/go-bindata/... + +.PHONE: +generate-template: + go-bindata -pkg template -prefix pkg/template/assets/ -o pkg/template/templates.go pkg/template/assets/ + create-kindcluster: ifeq (,$(shell kind get clusters)) @echo "no kind cluster" diff --git a/PROJECT b/PROJECT index 96cf4e0276e..56e319e160e 100644 --- a/PROJECT +++ b/PROJECT @@ -2,6 +2,15 @@ version: "2" domain: microsoft.com repo: github.com/Azure/azure-service-operator resources: +- group: azure + version: v1 + kind: Storage +- group: azure + version: v1 + kind: CosmosDB +- group: azure + version: v1 + kind: RedisCache - group: azure version: v1 kind: Eventhub diff --git a/api/v1/cosmosdb_types.go b/api/v1/cosmosdb_types.go new file mode 100644 index 00000000000..292e54e3529 --- /dev/null +++ b/api/v1/cosmosdb_types.go @@ -0,0 +1,136 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// CosmosDBSpec defines the desired state of CosmosDB +type CosmosDBSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // +kubebuilder:validation:MinLength=0 + + Location string `json:"location,omitempty"` + ResourceGroupName string `json:"resourceGroup"` + Kind CosmosDBKind `json:"kind,omitempty"` + Properties CosmosDBProperties `json:"properties,omitempty"` +} + +// CosmosDBKind enumerates the values for kind. +// Only one of the following kinds may be specified. +// If none of the following kinds is specified, the default one +// is GlobalDocumentDBKind. +// +kubebuilder:validation:Enum=GlobalDocumentDB;MongoDB +type CosmosDBKind string + +const ( + CosmosDBKindGlobalDocumentDB CosmosDBKind = "GlobalDocumentDB" + CosmosDBKindMongoDB CosmosDBKind = "MongoDB" +) + +// CosmosDBProperties the CosmosDBProperties of CosmosDB. +type CosmosDBProperties struct { + // CosmosDBDatabaseAccountOfferType - The offer type for the Cosmos DB database account. + DatabaseAccountOfferType CosmosDBDatabaseAccountOfferType `json:"databaseAccountOfferType,omitempty"` + //Locations []CosmosDBLocation `json:"locations,omitempty"` +} + +// +kubebuilder:validation:Enum=Standard +type CosmosDBDatabaseAccountOfferType string + +const ( + CosmosDBDatabaseAccountOfferTypeStandard CosmosDBDatabaseAccountOfferType = "Standard" +) + +/* +type CosmosDBLocation struct { + FailoverPriority int `json:"failoverPriority,omitempty"` + LocationName string `json:"locationName,omitempty"` + IsZoneRedundant bool `json:"isZoneRedundant,omitempty"` +} +*/ + +// CosmosDBStatus defines the observed state of CosmosDB +type CosmosDBStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // DeploymentName string `json:"deploymentName,omitempty"` + // ProvisioningState string `json:"provisioningState,omitempty"` + // Generation int64 `json:"generation,omitempty"` + Provisioning bool `json:"provisioning,omitempty"` + Provisioned bool `json:"provisioned,omitempty"` +} + +type CosmosDBOutput struct { + CosmosDBName string `json:"cosmosDBName,omitempty"` + PrimaryMasterKey string `json:"primaryMasterKey,omitempty"` + //SecondaryMasterKey string `json:"secondaryMasterKey,omitempty"` + //PrimaryReadonlyMasterKey string `json:"primaryReadonlyMasterKey,omitempty"` + //SecondaryReadonlyMasterKey string `json:"secondaryReadonlyMasterKey,omitempty"` +} + +// CosmosDBAdditionalResources holds the additional resources +type CosmosDBAdditionalResources struct { + Secrets []string `json:"secrets,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CosmosDB is the Schema for the cosmosdbs API +type CosmosDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CosmosDBSpec `json:"spec,omitempty"` + Status CosmosDBStatus `json:"status,omitempty"` + Output CosmosDBOutput `json:"output,omitempty"` + AdditionalResources CosmosDBAdditionalResources `json:"additionalResources,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CosmosDBList contains a list of CosmosDB +type CosmosDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CosmosDB `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CosmosDB{}, &CosmosDBList{}) +} + +func (cosmosDB *CosmosDB) IsSubmitted() bool { + return cosmosDB.Status.Provisioning || cosmosDB.Status.Provisioned +} diff --git a/api/v1/eventhub_types.go b/api/v1/eventhub_types.go index 89d0f5f6dda..45cc87fb822 100644 --- a/api/v1/eventhub_types.go +++ b/api/v1/eventhub_types.go @@ -29,7 +29,7 @@ type EventhubNamespaceResource struct { Location string `json:"location"` Sku EventhubNamespaceSku `json:"sku,omitempty"` Properties EventhubNamespaceProperties `json:"properties,omitempty"` - ResourceGroupName string `json:"resourcegroup,omitempty"` + ResourceGroupName string `json:"resourceGroup,omitempty"` } // EventhubSpec defines the desired state of Eventhub @@ -61,16 +61,58 @@ type EventhubAuthorizationRule struct { Rights []string `json:"rights,omitempty"` } +type StorageAccount struct { + // ResourceGroup - Name of the storage account resource group + // +kubebuilder:validation:Pattern=^[-\w\._\(\)]+$ + ResourceGroup string `json:"resourceGroup,omitempty"` + // AccountName - Name of the storage account + // +kubebuilder:validation:MaxLength=24 + // +kubebuilder:validation:MinLength=3 + // +kubebuilder:validation:Pattern=^[a-z0-9]+$ + AccountName string `json:"accountName,omitempty"` +} + +//Destination for capture (blob storage etc) +type Destination struct { + // ArchiveNameFormat - Blob naming convention for archive, e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. Here all the parameters (Namespace,EventHub .. etc) are mandatory irrespective of order + ArchiveNameFormat string `json:"archiveNameFormat,omitempty"` + // BlobContainer - Blob container Name + BlobContainer string `json:"blobContainer,omitempty"` + // Name - Name for capture destination + // +kubebuilder:validation:Enum=EventHubArchive.AzureBlockBlob;EventHubArchive.AzureDataLake + Name string `json:"name,omitempty"` + // StorageAccount - Details of the storage account + StorageAccount StorageAccount `json:"storageAccount,omitempty"` +} + +//CaptureDescription defines the properties required for eventhub capture +type CaptureDescription struct { + // Destination - Resource id of the storage account to be used to create the blobs + Destination Destination `json:"destination,omitempty"` + // Enabled - indicates whether capture is enabled + Enabled bool `json:"enabled"` + // SizeLimitInBytes - The size window defines the amount of data built up in your Event Hub before an capture operation + // +kubebuilder:validation:Maximum=524288000 + // +kubebuilder:validation:Minimum=10485760 + SizeLimitInBytes int32 `json:"sizeLimitInBytes,omitempty"` + // IntervalInSeconds - The time window allows you to set the frequency with which the capture to Azure Blobs will happen + // +kubebuilder:validation:Maximum=900 + // +kubebuilder:validation:Minimum=60 + IntervalInSeconds int32 `json:"intervalInSeconds,omitempty"` +} + //EventhubProperties defines the namespace properties type EventhubProperties struct { // +kubebuilder:validation:Maximum=7 // +kubebuilder:validation:Minimum=1 // MessageRetentionInDays - Number of days to retain the events for this Event Hub, value should be 1 to 7 days - MessageRetentionInDays int32 `json:"messageretentionindays,omitempty"` + MessageRetentionInDays int32 `json:"messageRetentionInDays,omitempty"` // +kubebuilder:validation:Maximum=32 - // +kubebuilder:validation:Minimum=1 - // PartitionCount - Number of partitions created for the Event Hub, allowed values are from 1 to 32 partitions. - PartitionCount int32 `json:"partitioncount,omitempty"` + // +kubebuilder:validation:Minimum=2 + // PartitionCount - Number of partitions created for the Event Hub, allowed values are from 2 to 32 partitions. + PartitionCount int32 `json:"partitionCount,omitempty"` + // CaptureDescription - Details specifying EventHub capture to persistent storage + CaptureDescription CaptureDescription `json:"captureDescription,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v1/eventhub_types_test.go b/api/v1/eventhub_types_test.go index c448093dc21..09db200b33b 100644 --- a/api/v1/eventhub_types_test.go +++ b/api/v1/eventhub_types_test.go @@ -41,32 +41,74 @@ var _ = Describe("Eventhub", func() { // Add any teardown steps that needs to be executed after each test }) + createEventHub := func(captureDescription CaptureDescription) *Eventhub { + created := &Eventhub{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Spec: EventhubSpec{ + Location: resourcegroupLocation, + Namespace: "foo-eventhub-ns-name", + ResourceGroup: "foo-resource-group", + Properties: EventhubProperties{ + MessageRetentionInDays: 7, + PartitionCount: 2, + CaptureDescription: captureDescription, + }, + }, + } + return created + } + // Add Tests for OpenAPI validation (or additonal CRD features) specified in // your API definition. // Avoid adding tests for vanilla CRUD operations because they would // test Kubernetes API server, which isn't the goal here. Context("Create API", func() { - It("should create an object successfully", func() { + It("should create an object successfully (without Capture)", func() { + key = types.NamespacedName{ + Name: "foo", + Namespace: "default", + } + + created = createEventHub(CaptureDescription{}) + + By("creating an API obj") + Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) + + fetched = &Eventhub{} + Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed()) + Expect(fetched).To(Equal(created)) + By("deleting the created object") + Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed()) + Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed()) + }) + + It("should create an object successfully (with Capture)", func() { key = types.NamespacedName{ Name: "foo", Namespace: "default", } - created = &Eventhub{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: EventhubSpec{ - Location: "westus", - Namespace: "fooeventhubNamespaceName", - ResourceGroup: "fooresourceGroupName", - Properties: EventhubProperties{ - MessageRetentionInDays: 7, - PartitionCount: 1, + + capture := CaptureDescription{ + Destination: Destination{ + ArchiveNameFormat: "{Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}", + BlobContainer: "foo-blob-container", + Name: "EventHubArchive.AzureBlockBlob", + StorageAccount: StorageAccount{ + ResourceGroup: "foo-resource-group", + AccountName: "fooaccountname", }, - }} + }, + Enabled: true, + SizeLimitInBytes: 524288000, + IntervalInSeconds: 90, + } + + created = createEventHub(capture) By("creating an API obj") Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) diff --git a/api/v1/eventhubnamespace_types.go b/api/v1/eventhubnamespace_types.go index e2687c8243d..3ebc7db3e31 100644 --- a/api/v1/eventhubnamespace_types.go +++ b/api/v1/eventhubnamespace_types.go @@ -30,7 +30,7 @@ type EventhubNamespaceSpec struct { Location string `json:"location"` Sku EventhubNamespaceSku `json:"sku,omitempty"` Properties EventhubNamespaceProperties `json:"properties,omitempty"` - ResourceGroup string `json:"resourcegroup,omitempty"` + ResourceGroup string `json:"resourceGroup,omitempty"` } // EventhubNamespaceStatus defines the observed state of EventhubNamespace @@ -70,9 +70,9 @@ type EventhubNamespaceSku struct { //EventhubNamespaceProperties defines the namespace properties type EventhubNamespaceProperties struct { - IsAutoInflateEnabled bool `json:"isautoinflateenabled,omitempty"` - MaximumThroughputUnits int32 `json:"maximumthroughputunits,omitempty"` - KafkaEnabled bool `json:"kafkaenabled,omitempty"` + IsAutoInflateEnabled bool `json:"isAutoInflateEnabled,omitempty"` + MaximumThroughputUnits int32 `json:"maximumThroughputUnits,omitempty"` + KafkaEnabled bool `json:"kafkaEnabled,omitempty"` } func init() { diff --git a/api/v1/eventhubnamespace_types_test.go b/api/v1/eventhubnamespace_types_test.go index 73323527c34..b69871198d9 100644 --- a/api/v1/eventhubnamespace_types_test.go +++ b/api/v1/eventhubnamespace_types_test.go @@ -59,7 +59,7 @@ var _ = Describe("EventhubNamespace", func() { Namespace: "default", }, Spec: EventhubNamespaceSpec{ - Location: "westus", + Location: resourcegroupLocation, ResourceGroup: "bar", }} diff --git a/api/v1/rediscache_types.go b/api/v1/rediscache_types.go new file mode 100644 index 00000000000..826d42ed068 --- /dev/null +++ b/api/v1/rediscache_types.go @@ -0,0 +1,131 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// RedisCacheSpec defines the desired state of RedisCache +type RedisCacheSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // +kubebuilder:validation:MinLength=0 + + Location string `json:"location,omitempty"` + ResourceGroupName string `json:"resourceGroup"` + Properties RedisCacheProperties `json:"properties,omitempty"` +} + +// RedisCacheProperties the properties of the Redis Cache. +type RedisCacheProperties struct { + Sku RedisCacheSku `json:"sku,omitempty"` + + EnableNonSslPort bool `json:"enableNonSslPort,omitempty"` +} + +// RedisCacheSku the SKU of the Redis Cache. +type RedisCacheSku struct { + // Name - The SKU name. Required for account creation; optional for update. + // Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS', 'PremiumZRS', 'StandardGZRS', 'StandardRAGZRS' + Name RedisCacheSkuName `json:"name,omitempty"` + + Family RedisCacheSkuFamily `json:"family,omitempty"` + + Capacity int32 `json:"capacity,omitempty"` +} + +type RedisCacheSkuName string + +const ( + Basic RedisCacheSkuName = "Basic" + Premium RedisCacheSkuName = "Premium" + Standard RedisCacheSkuName = "Standard" +) + +type RedisCacheSkuFamily string + +const ( + C RedisCacheSkuFamily = "C" + P RedisCacheSkuFamily = "P" +) + +// RedisCacheStatus defines the observed state of RedisCache +type RedisCacheStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // DeploymentName string `json:"deploymentName,omitempty"` + // ProvisioningState string `json:"provisioningState,omitempty"` + // Generation int64 `json:"generation,omitempty"` + Provisioning bool `json:"provisioning,omitempty"` + Provisioned bool `json:"provisioned,omitempty"` +} + +type RedisCacheOutput struct { + RedisCacheName string `json:"redisCacheName,omitempty"` + PrimaryKey string `json:"primaryKey,omitempty"` + SecondaryKey string `json:"secondaryKey,omitempty"` +} + +// StorageAdditionalResources holds the additional resources +type RedisCacheAdditionalResources struct { + Secrets []string `json:"secrets,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RedisCache is the Schema for the rediscaches API +type RedisCache struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RedisCacheSpec `json:"spec,omitempty"` + Status RedisCacheStatus `json:"status,omitempty"` + Output RedisCacheOutput `json:"output,omitempty"` + AdditionalResources RedisCacheAdditionalResources `json:"additionalResources,omitempty"` +} + +// +kubebuilder:object:root=true + +// RedisCacheList contains a list of RedisCache +type RedisCacheList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RedisCache `json:"items"` +} + +func init() { + SchemeBuilder.Register(&RedisCache{}, &RedisCacheList{}) +} + +func (redisCache *RedisCache) IsSubmitted() bool { + return redisCache.Status.Provisioning || redisCache.Status.Provisioned +} diff --git a/api/v1/resourcegroup_types_test.go b/api/v1/resourcegroup_types_test.go index 235b56823bb..931cb9113c9 100644 --- a/api/v1/resourcegroup_types_test.go +++ b/api/v1/resourcegroup_types_test.go @@ -59,7 +59,7 @@ var _ = Describe("ResourceGroup", func() { Namespace: "default", }, Spec: ResourceGroupSpec{ - Location: "westus", + Location: resourcegroupLocation, }} By("creating an API obj") diff --git a/api/v1/storage_types.go b/api/v1/storage_types.go new file mode 100644 index 00000000000..4a32f9f737b --- /dev/null +++ b/api/v1/storage_types.go @@ -0,0 +1,136 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// StorageSpec defines the desired state of Storage +type StorageSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // +kubebuilder:validation:MinLength=0 + + Location string `json:"location,omitempty"` + ResourceGroupName string `json:"resourceGroup"` + + Sku StorageSku `json:"sku,omitempty"` + + Kind StorageKind `json:"kind,omitempty"` + + AccessTier StorageAccessTier `json:"accessTier,omitempty"` + + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` +} + +// Sku the SKU of the storage account. +type StorageSku struct { + // Name - The SKU name. Required for account creation; optional for update. + // Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS', 'PremiumZRS', 'StandardGZRS', 'StandardRAGZRS' + Name StorageSkuName `json:"name,omitempty"` +} + +// StorageSkuName enumerates the values for sku name. +// Only one of the following sku names may be specified. +// If none of the following sku names is specified, the default one +// is StorageV2. +// +kubebuilder:validation:Enum=Premium_LRS;Premium_ZRS;Standard_GRS;Standard_GZRS;Standard_LRS;Standard_RAGRS;Standard_RAGZRS;Standard_ZRS +type StorageSkuName string + +// StorageKind enumerates the values for kind. +// Only one of the following kinds may be specified. +// If none of the following kinds is specified, the default one +// is StorageV2. +// +kubebuilder:validation:Enum=BlobStorage;BlockBlobStorage;FileStorage;Storage;StorageV2 +type StorageKind string + +// AccessTier enumerates the values for access tier. +// Only one of the following access tiers may be specified. +// If none of the following access tiers is specified, the default one +// is Hot. +// +kubebuilder:validation:Enum=Cool;Hot +type StorageAccessTier string + +// StorageStatus defines the observed state of Storage +type StorageStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // DeploymentName string `json:"deploymentName,omitempty"` + // ProvisioningState string `json:"provisioningState,omitempty"` + // Generation int64 `json:"generation,omitempty"` + Provisioning bool `json:"provisioning,omitempty"` + Provisioned bool `json:"provisioned,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Storage is the Schema for the storages API +type Storage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec StorageSpec `json:"spec,omitempty"` + Status StorageStatus `json:"status,omitempty"` + Output StorageOutput `json:"output,omitempty"` + AdditionalResources StorageAdditionalResources `json:"additionalResources,omitempty"` +} + +type StorageOutput struct { + StorageAccountName string `json:"storageAccountName,omitempty"` + Key1 string `json:"key1,omitempty"` + Key2 string `json:"key2,omitempty"` + ConnectionString1 string `json:"connectionString1,omitempty"` + ConnectionString2 string `json:"connectionString2,omitempty"` +} + +// StorageAdditionalResources holds the additional resources +type StorageAdditionalResources struct { + Secrets []string `json:"secrets,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StorageList contains a list of Storage +type StorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Storage `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Storage{}, &StorageList{}) +} + +func (storage *Storage) IsSubmitted() bool { + return storage.Status.Provisioning || storage.Status.Provisioned +} diff --git a/api/v1/suite_test.go b/api/v1/suite_test.go index e4d5357f792..61223b29b96 100644 --- a/api/v1/suite_test.go +++ b/api/v1/suite_test.go @@ -22,6 +22,8 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + resourcemanagerconfig "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" @@ -36,6 +38,7 @@ import ( var cfg *rest.Config var k8sClient client.Client var testEnv *envtest.Environment +var resourcegroupLocation string func TestAPIs(t *testing.T) { t.Parallel() @@ -54,6 +57,9 @@ var _ = BeforeSuite(func(done Done) { CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, } + resourcemanagerconfig.ParseEnvironment() + resourcegroupLocation = resourcemanagerconfig.DefaultLocation() + err := SchemeBuilder.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index a1c583ca105..203e7b8e13b 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -23,6 +23,22 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureDescription) DeepCopyInto(out *CaptureDescription) { + *out = *in + out.Destination = in.Destination +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureDescription. +func (in *CaptureDescription) DeepCopy() *CaptureDescription { + if in == nil { + return nil + } + out := new(CaptureDescription) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConsumerGroup) DeepCopyInto(out *ConsumerGroup) { *out = *in @@ -112,6 +128,164 @@ func (in *ConsumerGroupStatus) DeepCopy() *ConsumerGroupStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDB) DeepCopyInto(out *CosmosDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDB. +func (in *CosmosDB) DeepCopy() *CosmosDB { + if in == nil { + return nil + } + out := new(CosmosDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CosmosDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBAdditionalResources) DeepCopyInto(out *CosmosDBAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBAdditionalResources. +func (in *CosmosDBAdditionalResources) DeepCopy() *CosmosDBAdditionalResources { + if in == nil { + return nil + } + out := new(CosmosDBAdditionalResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBList) DeepCopyInto(out *CosmosDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CosmosDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBList. +func (in *CosmosDBList) DeepCopy() *CosmosDBList { + if in == nil { + return nil + } + out := new(CosmosDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CosmosDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBOutput) DeepCopyInto(out *CosmosDBOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBOutput. +func (in *CosmosDBOutput) DeepCopy() *CosmosDBOutput { + if in == nil { + return nil + } + out := new(CosmosDBOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBProperties) DeepCopyInto(out *CosmosDBProperties) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBProperties. +func (in *CosmosDBProperties) DeepCopy() *CosmosDBProperties { + if in == nil { + return nil + } + out := new(CosmosDBProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBSpec) DeepCopyInto(out *CosmosDBSpec) { + *out = *in + out.Properties = in.Properties +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBSpec. +func (in *CosmosDBSpec) DeepCopy() *CosmosDBSpec { + if in == nil { + return nil + } + out := new(CosmosDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBStatus) DeepCopyInto(out *CosmosDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBStatus. +func (in *CosmosDBStatus) DeepCopy() *CosmosDBStatus { + if in == nil { + return nil + } + out := new(CosmosDBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Destination) DeepCopyInto(out *Destination) { + *out = *in + out.StorageAccount = in.StorageAccount +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destination. +func (in *Destination) DeepCopy() *Destination { + if in == nil { + return nil + } + out := new(Destination) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Eventhub) DeepCopyInto(out *Eventhub) { *out = *in @@ -332,6 +506,7 @@ func (in *EventhubNamespaceStatus) DeepCopy() *EventhubNamespaceStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EventhubProperties) DeepCopyInto(out *EventhubProperties) { *out = *in + out.CaptureDescription = in.CaptureDescription } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventhubProperties. @@ -465,6 +640,164 @@ func (in *KeyVaultStatus) DeepCopy() *KeyVaultStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCache) DeepCopyInto(out *RedisCache) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCache. +func (in *RedisCache) DeepCopy() *RedisCache { + if in == nil { + return nil + } + out := new(RedisCache) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCache) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheAdditionalResources) DeepCopyInto(out *RedisCacheAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheAdditionalResources. +func (in *RedisCacheAdditionalResources) DeepCopy() *RedisCacheAdditionalResources { + if in == nil { + return nil + } + out := new(RedisCacheAdditionalResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheList) DeepCopyInto(out *RedisCacheList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RedisCache, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheList. +func (in *RedisCacheList) DeepCopy() *RedisCacheList { + if in == nil { + return nil + } + out := new(RedisCacheList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCacheList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheOutput) DeepCopyInto(out *RedisCacheOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheOutput. +func (in *RedisCacheOutput) DeepCopy() *RedisCacheOutput { + if in == nil { + return nil + } + out := new(RedisCacheOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheProperties) DeepCopyInto(out *RedisCacheProperties) { + *out = *in + out.Sku = in.Sku +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheProperties. +func (in *RedisCacheProperties) DeepCopy() *RedisCacheProperties { + if in == nil { + return nil + } + out := new(RedisCacheProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheSku) DeepCopyInto(out *RedisCacheSku) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheSku. +func (in *RedisCacheSku) DeepCopy() *RedisCacheSku { + if in == nil { + return nil + } + out := new(RedisCacheSku) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheSpec) DeepCopyInto(out *RedisCacheSpec) { + *out = *in + out.Properties = in.Properties +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheSpec. +func (in *RedisCacheSpec) DeepCopy() *RedisCacheSpec { + if in == nil { + return nil + } + out := new(RedisCacheSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheStatus) DeepCopyInto(out *RedisCacheStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheStatus. +func (in *RedisCacheStatus) DeepCopy() *RedisCacheStatus { + if in == nil { + return nil + } + out := new(RedisCacheStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceGroup) DeepCopyInto(out *ResourceGroup) { *out = *in @@ -820,3 +1153,165 @@ func (in *SqlServerStatus) DeepCopy() *SqlServerStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Storage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccount) DeepCopyInto(out *StorageAccount) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccount. +func (in *StorageAccount) DeepCopy() *StorageAccount { + if in == nil { + return nil + } + out := new(StorageAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAdditionalResources) DeepCopyInto(out *StorageAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAdditionalResources. +func (in *StorageAdditionalResources) DeepCopy() *StorageAdditionalResources { + if in == nil { + return nil + } + out := new(StorageAdditionalResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageList) DeepCopyInto(out *StorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Storage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList. +func (in *StorageList) DeepCopy() *StorageList { + if in == nil { + return nil + } + out := new(StorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOutput) DeepCopyInto(out *StorageOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOutput. +func (in *StorageOutput) DeepCopy() *StorageOutput { + if in == nil { + return nil + } + out := new(StorageOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSku) DeepCopyInto(out *StorageSku) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSku. +func (in *StorageSku) DeepCopy() *StorageSku { + if in == nil { + return nil + } + out := new(StorageSku) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in + out.Sku = in.Sku + if in.EnableHTTPSTrafficOnly != nil { + in, out := &in.EnableHTTPSTrafficOnly, &out.EnableHTTPSTrafficOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. +func (in *StorageStatus) DeepCopy() *StorageStatus { + if in == nil { + return nil + } + out := new(StorageStatus) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/azure.microsoft.com_cosmosdbs.yaml b/config/crd/bases/azure.microsoft.com_cosmosdbs.yaml new file mode 100644 index 00000000000..ed4c39e607a --- /dev/null +++ b/config/crd/bases/azure.microsoft.com_cosmosdbs.yaml @@ -0,0 +1,465 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: cosmosdbs.azure.microsoft.com +spec: + group: azure.microsoft.com + names: + kind: CosmosDB + plural: cosmosdbs + scope: "" + subresources: + status: {} + validation: + openAPIV3Schema: + description: CosmosDB is the Schema for the cosmosdbs API + properties: + additionalResources: + description: CosmosDBAdditionalResources holds the additional resources + properties: + secrets: + items: + type: string + type: array + type: object + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + description: ObjectMeta is metadata that all persisted resources must have, + which includes all objects users must create. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with + a resource that may be set by external tools to store and retrieve + arbitrary metadata. They are not queryable and should be preserved + when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + clusterName: + description: The name of the cluster which the object belongs to. This + is used to distinguish resources with same name and namespace in different + clusters. This field is not set anywhere right now and apiserver is + going to ignore it if set in create or update request. + type: string + creationTimestamp: + description: "CreationTimestamp is a timestamp representing the server + time when this object was created. It is not guaranteed to be set + in happens-before order across separate operations. Clients may not + set this value. It is represented in RFC3339 form and is in UTC. \n + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to gracefully + terminate before it will be removed from the system. Only set when + deletionTimestamp is also set. May only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: "DeletionTimestamp is RFC 3339 date and time at which this + resource will be deleted. This field is set by the server when a graceful + deletion is requested by the user, and is not directly settable by + a client. The resource is expected to be deleted (no longer visible + from resource lists, and not reachable by name) after the time in + this field, once the finalizers list is empty. As long as the finalizers + list contains items, deletion is blocked. Once the deletionTimestamp + is set, this value may not be unset or be set further into the future, + although it may be shortened or the resource may be deleted prior + to this time. For example, a user may request that a pod is deleted + in 30 seconds. The Kubelet will react by sending a graceful termination + signal to the containers in the pod. After that 30 seconds, the Kubelet + will send a hard termination signal (SIGKILL) to the container and + after cleanup, remove the pod from the API. In the presence of network + partitions, this object may still exist after this timestamp, until + an administrator or automated process can determine the resource is + fully terminated. If not set, graceful deletion of the object has + not been requested. \n Populated by the system when a graceful deletion + is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted from the registry. + Each entry is an identifier for the responsible component that will + remove the entry from the list. If the deletionTimestamp of the object + is non-nil, entries in this list can only be removed. + items: + type: string + type: array + generateName: + description: "GenerateName is an optional prefix, used by the server, + to generate a unique name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules as the Name + field, and may be truncated by the length of the suffix required to + make the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return a 409 - + instead, it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time allotted, + and the client should retry (optionally after the time indicated in + the Retry-After header). \n Applied only if Name is not specified. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + generation: + description: A sequence number representing a specific generation of + the desired state. Populated by the system. Read-only. + format: int64 + type: integer + initializers: + description: "An initializer is a controller which enforces some system + invariant at object creation time. This field is a list of initializers + that have not yet acted on this object. If nil or empty, this object + has been completely initialized. Otherwise, the object is considered + uninitialized and is hidden (in list/watch and get calls) from clients + that haven't explicitly asked to observe uninitialized objects. \n + When an object is created, the system will populate this list with + the current set of initializers. Only privileged users may set or + modify this list. Once it is empty, it may not be modified further + by any user. \n DEPRECATED - initializers are an alpha field and will + be removed in v1.15." + properties: + pending: + description: Pending is a list of initializers that must execute + in order before this object is visible. When the last pending + initializer is removed, and no failing result is set, the initializers + struct will be set to nil and the object is considered as initialized + and visible to all clients. + items: + description: Initializer is information about an initializer that + has not yet completed. + properties: + name: + description: name of the process that is responsible for initializing + this object. + type: string + required: + - name + type: object + type: array + result: + description: If result is set with the Failure field, the object + will be persisted to storage and then deleted, ensuring that other + clients can observe the deletion. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + code: + description: Suggested HTTP return code for this status, 0 if + not set. + format: int32 + type: integer + details: + description: Extended data associated with the reason. Each + reason may define its own extended details. This field is + optional and the data returned is not guaranteed to conform + to any schema except that defined by the reason type. + properties: + causes: + description: The Causes array includes more details associated + with the StatusReason failure. Not all StatusReasons may + provide detailed causes. + items: + description: StatusCause provides more information about + an api.Status failure, including cases when multiple + errors are encountered. + properties: + field: + description: "The field of the resource that has caused + this error, as named by its JSON serialization. + May include dot and postfix notation for nested + attributes. Arrays are zero-indexed. Fields may + appear more than once in an array of causes due + to fields having multiple errors. Optional. \n Examples: + \ \"name\" - the field \"name\" on the current + resource \"items[0].name\" - the field \"name\" + on the first array entry in \"items\"" + type: string + message: + description: A human-readable description of the cause + of the error. This field may be presented as-is + to a reader. + type: string + reason: + description: A machine-readable description of the + cause of the error. If this value is empty there + is no information available. + type: string + type: object + type: array + group: + description: The group attribute of the resource associated + with the status StatusReason. + type: string + kind: + description: 'The kind attribute of the resource associated + with the status StatusReason. On some operations may differ + from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: The name attribute of the resource associated + with the status StatusReason (when there is a single name + which can be described). + type: string + retryAfterSeconds: + description: If specified, the time in seconds before the + operation should be retried. Some errors may indicate + the client must take an alternate action - for those errors + this field may indicate how long to wait before taking + the alternate action. + format: int32 + type: integer + uid: + description: 'UID of the resource. (when there is a single + resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + type: object + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + message: + description: A human-readable description of the status of this + operation. + type: string + metadata: + description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + properties: + continue: + description: continue may be set if the user set a limit + on the number of items returned, and indicates that the + server has more data available. The value is opaque and + may be used to issue another request to the endpoint that + served this list to retrieve the next set of available + objects. Continuing a consistent list may not be possible + if the server configuration has changed or more than a + few minutes have passed. The resourceVersion field returned + when using this continue value will be identical to the + value in the first response, unless you have received + this token from an error message. + type: string + resourceVersion: + description: 'String that identifies the server''s internal + version of this object that can be used by clients to + determine when objects have changed. Value must be treated + as opaque by clients and passed unmodified back to the + server. Populated by the system. Read-only. More info: + https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + selfLink: + description: selfLink is a URL representing this object. + Populated by the system. Read-only. + type: string + type: object + reason: + description: A machine-readable description of why this operation + is in the "Failure" status. If this value is empty there is + no information available. A Reason clarifies an HTTP status + code but does not override it. + type: string + status: + description: 'Status of the operation. One of: "Success" or + "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + type: string + type: object + required: + - pending + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize + and categorize (scope and select) objects. May match selectors of + replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: "ManagedFields maps workflow-id and version to the set + of fields that are managed by that workflow. This is mostly for internal + housekeeping, and users typically shouldn't need to set or understand + this field. A workflow can be the user's name, a controller's name, + or the name of a specific apply path like \"ci-cd\". The set of fields + is always in the version that the workflow used when modifying the + object. \n This field is alpha and can be changed or removed without + notice." + items: + description: ManagedFieldsEntry is a workflow-id, a FieldSet and the + group version of the resource that the fieldset applies to. + properties: + apiVersion: + description: APIVersion defines the version of this resource that + this field set applies to. The format is "group/version" just + like the top-level APIVersion field. It is necessary to track + the version of a field set because it cannot be automatically + converted. + type: string + fields: + additionalProperties: true + description: Fields identifies a set of fields. + type: object + manager: + description: Manager is an identifier of the workflow managing + these fields. + type: string + operation: + description: Operation is the type of operation which lead to + this ManagedFieldsEntry being created. The only valid values + for this field are 'Apply' and 'Update'. + type: string + time: + description: Time is timestamp of when these fields were set. + It should always be empty if Operation is 'Apply' + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is required when + creating resources, although some resources may allow a client to + request the generation of an appropriate name automatically. Name + is primarily intended for creation idempotence and configuration definition. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. + An empty namespace is equivalent to the \"default\" namespace, but + \"default\" is the canonical representation. Not all objects are required + to be scoped to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects + in the list have been deleted, this object will be garbage collected. + If this object is managed by a controller, then an entry in this list + will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let you + identify an owning object. An owning object must be in the same + namespace as the dependent, or be cluster-scoped, so there is no + namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the key-value + store until this reference is removed. Defaults to false. To + set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: "An opaque value that represents the internal version of + this object that can be used by clients to determine when objects + have changed. May be used for optimistic concurrency, change detection, + and the watch operation on a resource or set of resources. Clients + must treat these values as opaque and passed unmodified back to the + server. They may only be valid for a particular resource or set of + resources. \n Populated by the system. Read-only. Value must be treated + as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" + type: string + selfLink: + description: SelfLink is a URL representing this object. Populated by + the system. Read-only. + type: string + uid: + description: "UID is the unique in time and space value for this object. + It is typically generated by the server on successful creation of + a resource and is not allowed to change on PUT operations. \n Populated + by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + type: string + type: object + output: + properties: + cosmosDBName: + type: string + primaryMasterKey: + type: string + type: object + spec: + description: CosmosDBSpec defines the desired state of CosmosDB + properties: + kind: + description: CosmosDBKind enumerates the values for kind. Only one of + the following kinds may be specified. If none of the following kinds + is specified, the default one is GlobalDocumentDBKind. + enum: + - GlobalDocumentDB + - MongoDB + type: string + location: + minLength: 0 + type: string + properties: + description: CosmosDBProperties the CosmosDBProperties of CosmosDB. + properties: + databaseAccountOfferType: + description: CosmosDBDatabaseAccountOfferType - The offer type for + the Cosmos DB database account. + enum: + - Standard + type: string + type: object + resourceGroup: + type: string + required: + - resourceGroup + type: object + status: + description: CosmosDBStatus defines the observed state of CosmosDB + properties: + provisioned: + type: boolean + provisioning: + description: DeploymentName string `json:"deploymentName,omitempty"` + ProvisioningState string `json:"provisioningState,omitempty"` Generation int64 `json:"generation,omitempty"` + type: boolean + type: object + type: object + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/azure.microsoft.com_eventhubnamespaces.yaml b/config/crd/bases/azure.microsoft.com_eventhubnamespaces.yaml index f05791e430e..346ff0b08cf 100644 --- a/config/crd/bases/azure.microsoft.com_eventhubnamespaces.yaml +++ b/config/crd/bases/azure.microsoft.com_eventhubnamespaces.yaml @@ -406,15 +406,15 @@ spec: properties: description: EventhubNamespaceProperties defines the namespace properties properties: - isautoinflateenabled: + isAutoInflateEnabled: type: boolean - kafkaenabled: + kafkaEnabled: type: boolean - maximumthroughputunits: + maximumThroughputUnits: format: int32 type: integer type: object - resourcegroup: + resourceGroup: type: string sku: description: EventhubNamespaceSku defines the sku diff --git a/config/crd/bases/azure.microsoft.com_eventhubs.yaml b/config/crd/bases/azure.microsoft.com_eventhubs.yaml index 249dc37d014..c32d7775287 100644 --- a/config/crd/bases/azure.microsoft.com_eventhubs.yaml +++ b/config/crd/bases/azure.microsoft.com_eventhubs.yaml @@ -421,19 +421,80 @@ spec: properties: description: EventhubProperties defines the namespace properties properties: - messageretentionindays: + captureDescription: + description: CaptureDescription - Details specifying EventHub capture + to persistent storage + properties: + destination: + description: Destination - Resource id of the storage account + to be used to create the blobs + properties: + archiveNameFormat: + description: ArchiveNameFormat - Blob naming convention + for archive, e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. + Here all the parameters (Namespace,EventHub .. etc) are + mandatory irrespective of order + type: string + blobContainer: + description: BlobContainer - Blob container Name + type: string + name: + description: Name - Name for capture destination + enum: + - EventHubArchive.AzureBlockBlob + - EventHubArchive.AzureDataLake + type: string + storageAccount: + description: StorageAccount - Details of the storage account + properties: + accountName: + description: AccountName - Name of the storage account + maxLength: 24 + minLength: 3 + pattern: ^[a-z0-9]+$ + type: string + resourceGroup: + description: ResourceGroup - Name of the storage account + resource group + pattern: ^[-\w\._\(\)]+$ + type: string + type: object + type: object + enabled: + description: Enabled - indicates whether capture is enabled + type: boolean + intervalInSeconds: + description: IntervalInSeconds - The time window allows you + to set the frequency with which the capture to Azure Blobs + will happen + format: int32 + maximum: 900 + minimum: 60 + type: integer + sizeLimitInBytes: + description: SizeLimitInBytes - The size window defines the + amount of data built up in your Event Hub before an capture + operation + format: int32 + maximum: 524288000 + minimum: 10485760 + type: integer + required: + - enabled + type: object + messageRetentionInDays: description: MessageRetentionInDays - Number of days to retain the events for this Event Hub, value should be 1 to 7 days format: int32 maximum: 7 minimum: 1 type: integer - partitioncount: + partitionCount: description: PartitionCount - Number of partitions created for the - Event Hub, allowed values are from 1 to 32 partitions. + Event Hub, allowed values are from 2 to 32 partitions. format: int32 maximum: 32 - minimum: 1 + minimum: 2 type: integer type: object resourcegroup: diff --git a/config/crd/bases/azure.microsoft.com_rediscaches.yaml b/config/crd/bases/azure.microsoft.com_rediscaches.yaml new file mode 100644 index 00000000000..ede9cc107fa --- /dev/null +++ b/config/crd/bases/azure.microsoft.com_rediscaches.yaml @@ -0,0 +1,470 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: rediscaches.azure.microsoft.com +spec: + group: azure.microsoft.com + names: + kind: RedisCache + plural: rediscaches + scope: "" + subresources: + status: {} + validation: + openAPIV3Schema: + description: RedisCache is the Schema for the rediscaches API + properties: + additionalResources: + description: StorageAdditionalResources holds the additional resources + properties: + secrets: + items: + type: string + type: array + type: object + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + description: ObjectMeta is metadata that all persisted resources must have, + which includes all objects users must create. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with + a resource that may be set by external tools to store and retrieve + arbitrary metadata. They are not queryable and should be preserved + when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + clusterName: + description: The name of the cluster which the object belongs to. This + is used to distinguish resources with same name and namespace in different + clusters. This field is not set anywhere right now and apiserver is + going to ignore it if set in create or update request. + type: string + creationTimestamp: + description: "CreationTimestamp is a timestamp representing the server + time when this object was created. It is not guaranteed to be set + in happens-before order across separate operations. Clients may not + set this value. It is represented in RFC3339 form and is in UTC. \n + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to gracefully + terminate before it will be removed from the system. Only set when + deletionTimestamp is also set. May only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: "DeletionTimestamp is RFC 3339 date and time at which this + resource will be deleted. This field is set by the server when a graceful + deletion is requested by the user, and is not directly settable by + a client. The resource is expected to be deleted (no longer visible + from resource lists, and not reachable by name) after the time in + this field, once the finalizers list is empty. As long as the finalizers + list contains items, deletion is blocked. Once the deletionTimestamp + is set, this value may not be unset or be set further into the future, + although it may be shortened or the resource may be deleted prior + to this time. For example, a user may request that a pod is deleted + in 30 seconds. The Kubelet will react by sending a graceful termination + signal to the containers in the pod. After that 30 seconds, the Kubelet + will send a hard termination signal (SIGKILL) to the container and + after cleanup, remove the pod from the API. In the presence of network + partitions, this object may still exist after this timestamp, until + an administrator or automated process can determine the resource is + fully terminated. If not set, graceful deletion of the object has + not been requested. \n Populated by the system when a graceful deletion + is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted from the registry. + Each entry is an identifier for the responsible component that will + remove the entry from the list. If the deletionTimestamp of the object + is non-nil, entries in this list can only be removed. + items: + type: string + type: array + generateName: + description: "GenerateName is an optional prefix, used by the server, + to generate a unique name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules as the Name + field, and may be truncated by the length of the suffix required to + make the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return a 409 - + instead, it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time allotted, + and the client should retry (optionally after the time indicated in + the Retry-After header). \n Applied only if Name is not specified. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + generation: + description: A sequence number representing a specific generation of + the desired state. Populated by the system. Read-only. + format: int64 + type: integer + initializers: + description: "An initializer is a controller which enforces some system + invariant at object creation time. This field is a list of initializers + that have not yet acted on this object. If nil or empty, this object + has been completely initialized. Otherwise, the object is considered + uninitialized and is hidden (in list/watch and get calls) from clients + that haven't explicitly asked to observe uninitialized objects. \n + When an object is created, the system will populate this list with + the current set of initializers. Only privileged users may set or + modify this list. Once it is empty, it may not be modified further + by any user. \n DEPRECATED - initializers are an alpha field and will + be removed in v1.15." + properties: + pending: + description: Pending is a list of initializers that must execute + in order before this object is visible. When the last pending + initializer is removed, and no failing result is set, the initializers + struct will be set to nil and the object is considered as initialized + and visible to all clients. + items: + description: Initializer is information about an initializer that + has not yet completed. + properties: + name: + description: name of the process that is responsible for initializing + this object. + type: string + required: + - name + type: object + type: array + result: + description: If result is set with the Failure field, the object + will be persisted to storage and then deleted, ensuring that other + clients can observe the deletion. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + code: + description: Suggested HTTP return code for this status, 0 if + not set. + format: int32 + type: integer + details: + description: Extended data associated with the reason. Each + reason may define its own extended details. This field is + optional and the data returned is not guaranteed to conform + to any schema except that defined by the reason type. + properties: + causes: + description: The Causes array includes more details associated + with the StatusReason failure. Not all StatusReasons may + provide detailed causes. + items: + description: StatusCause provides more information about + an api.Status failure, including cases when multiple + errors are encountered. + properties: + field: + description: "The field of the resource that has caused + this error, as named by its JSON serialization. + May include dot and postfix notation for nested + attributes. Arrays are zero-indexed. Fields may + appear more than once in an array of causes due + to fields having multiple errors. Optional. \n Examples: + \ \"name\" - the field \"name\" on the current + resource \"items[0].name\" - the field \"name\" + on the first array entry in \"items\"" + type: string + message: + description: A human-readable description of the cause + of the error. This field may be presented as-is + to a reader. + type: string + reason: + description: A machine-readable description of the + cause of the error. If this value is empty there + is no information available. + type: string + type: object + type: array + group: + description: The group attribute of the resource associated + with the status StatusReason. + type: string + kind: + description: 'The kind attribute of the resource associated + with the status StatusReason. On some operations may differ + from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: The name attribute of the resource associated + with the status StatusReason (when there is a single name + which can be described). + type: string + retryAfterSeconds: + description: If specified, the time in seconds before the + operation should be retried. Some errors may indicate + the client must take an alternate action - for those errors + this field may indicate how long to wait before taking + the alternate action. + format: int32 + type: integer + uid: + description: 'UID of the resource. (when there is a single + resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + type: object + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + message: + description: A human-readable description of the status of this + operation. + type: string + metadata: + description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + properties: + continue: + description: continue may be set if the user set a limit + on the number of items returned, and indicates that the + server has more data available. The value is opaque and + may be used to issue another request to the endpoint that + served this list to retrieve the next set of available + objects. Continuing a consistent list may not be possible + if the server configuration has changed or more than a + few minutes have passed. The resourceVersion field returned + when using this continue value will be identical to the + value in the first response, unless you have received + this token from an error message. + type: string + resourceVersion: + description: 'String that identifies the server''s internal + version of this object that can be used by clients to + determine when objects have changed. Value must be treated + as opaque by clients and passed unmodified back to the + server. Populated by the system. Read-only. More info: + https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + selfLink: + description: selfLink is a URL representing this object. + Populated by the system. Read-only. + type: string + type: object + reason: + description: A machine-readable description of why this operation + is in the "Failure" status. If this value is empty there is + no information available. A Reason clarifies an HTTP status + code but does not override it. + type: string + status: + description: 'Status of the operation. One of: "Success" or + "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + type: string + type: object + required: + - pending + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize + and categorize (scope and select) objects. May match selectors of + replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: "ManagedFields maps workflow-id and version to the set + of fields that are managed by that workflow. This is mostly for internal + housekeeping, and users typically shouldn't need to set or understand + this field. A workflow can be the user's name, a controller's name, + or the name of a specific apply path like \"ci-cd\". The set of fields + is always in the version that the workflow used when modifying the + object. \n This field is alpha and can be changed or removed without + notice." + items: + description: ManagedFieldsEntry is a workflow-id, a FieldSet and the + group version of the resource that the fieldset applies to. + properties: + apiVersion: + description: APIVersion defines the version of this resource that + this field set applies to. The format is "group/version" just + like the top-level APIVersion field. It is necessary to track + the version of a field set because it cannot be automatically + converted. + type: string + fields: + additionalProperties: true + description: Fields identifies a set of fields. + type: object + manager: + description: Manager is an identifier of the workflow managing + these fields. + type: string + operation: + description: Operation is the type of operation which lead to + this ManagedFieldsEntry being created. The only valid values + for this field are 'Apply' and 'Update'. + type: string + time: + description: Time is timestamp of when these fields were set. + It should always be empty if Operation is 'Apply' + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is required when + creating resources, although some resources may allow a client to + request the generation of an appropriate name automatically. Name + is primarily intended for creation idempotence and configuration definition. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. + An empty namespace is equivalent to the \"default\" namespace, but + \"default\" is the canonical representation. Not all objects are required + to be scoped to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects + in the list have been deleted, this object will be garbage collected. + If this object is managed by a controller, then an entry in this list + will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let you + identify an owning object. An owning object must be in the same + namespace as the dependent, or be cluster-scoped, so there is no + namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the key-value + store until this reference is removed. Defaults to false. To + set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: "An opaque value that represents the internal version of + this object that can be used by clients to determine when objects + have changed. May be used for optimistic concurrency, change detection, + and the watch operation on a resource or set of resources. Clients + must treat these values as opaque and passed unmodified back to the + server. They may only be valid for a particular resource or set of + resources. \n Populated by the system. Read-only. Value must be treated + as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" + type: string + selfLink: + description: SelfLink is a URL representing this object. Populated by + the system. Read-only. + type: string + uid: + description: "UID is the unique in time and space value for this object. + It is typically generated by the server on successful creation of + a resource and is not allowed to change on PUT operations. \n Populated + by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + type: string + type: object + output: + properties: + primaryKey: + type: string + redisCacheName: + type: string + secondaryKey: + type: string + type: object + spec: + description: RedisCacheSpec defines the desired state of RedisCache + properties: + location: + minLength: 0 + type: string + properties: + description: RedisCacheProperties the properties of the Redis Cache. + properties: + enableNonSslPort: + type: boolean + sku: + description: RedisCacheSku the SKU of the Redis Cache. + properties: + capacity: + format: int32 + type: integer + family: + type: string + name: + description: 'Name - The SKU name. Required for account creation; + optional for update. Possible values include: ''StandardLRS'', + ''StandardGRS'', ''StandardRAGRS'', ''StandardZRS'', ''PremiumLRS'', + ''PremiumZRS'', ''StandardGZRS'', ''StandardRAGZRS''' + type: string + type: object + type: object + resourceGroup: + type: string + required: + - resourceGroup + type: object + status: + description: RedisCacheStatus defines the observed state of RedisCache + properties: + provisioned: + type: boolean + provisioning: + description: DeploymentName string `json:"deploymentName,omitempty"` + ProvisioningState string `json:"provisioningState,omitempty"` Generation int64 `json:"generation,omitempty"` + type: boolean + type: object + type: object + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/azure.microsoft.com_storages.yaml b/config/crd/bases/azure.microsoft.com_storages.yaml new file mode 100644 index 00000000000..36bd1ce2c1f --- /dev/null +++ b/config/crd/bases/azure.microsoft.com_storages.yaml @@ -0,0 +1,493 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: storages.azure.microsoft.com +spec: + group: azure.microsoft.com + names: + kind: Storage + plural: storages + scope: "" + subresources: + status: {} + validation: + openAPIV3Schema: + description: Storage is the Schema for the storages API + properties: + additionalResources: + description: StorageAdditionalResources holds the additional resources + properties: + secrets: + items: + type: string + type: array + type: object + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + description: ObjectMeta is metadata that all persisted resources must have, + which includes all objects users must create. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with + a resource that may be set by external tools to store and retrieve + arbitrary metadata. They are not queryable and should be preserved + when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + clusterName: + description: The name of the cluster which the object belongs to. This + is used to distinguish resources with same name and namespace in different + clusters. This field is not set anywhere right now and apiserver is + going to ignore it if set in create or update request. + type: string + creationTimestamp: + description: "CreationTimestamp is a timestamp representing the server + time when this object was created. It is not guaranteed to be set + in happens-before order across separate operations. Clients may not + set this value. It is represented in RFC3339 form and is in UTC. \n + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to gracefully + terminate before it will be removed from the system. Only set when + deletionTimestamp is also set. May only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: "DeletionTimestamp is RFC 3339 date and time at which this + resource will be deleted. This field is set by the server when a graceful + deletion is requested by the user, and is not directly settable by + a client. The resource is expected to be deleted (no longer visible + from resource lists, and not reachable by name) after the time in + this field, once the finalizers list is empty. As long as the finalizers + list contains items, deletion is blocked. Once the deletionTimestamp + is set, this value may not be unset or be set further into the future, + although it may be shortened or the resource may be deleted prior + to this time. For example, a user may request that a pod is deleted + in 30 seconds. The Kubelet will react by sending a graceful termination + signal to the containers in the pod. After that 30 seconds, the Kubelet + will send a hard termination signal (SIGKILL) to the container and + after cleanup, remove the pod from the API. In the presence of network + partitions, this object may still exist after this timestamp, until + an administrator or automated process can determine the resource is + fully terminated. If not set, graceful deletion of the object has + not been requested. \n Populated by the system when a graceful deletion + is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted from the registry. + Each entry is an identifier for the responsible component that will + remove the entry from the list. If the deletionTimestamp of the object + is non-nil, entries in this list can only be removed. + items: + type: string + type: array + generateName: + description: "GenerateName is an optional prefix, used by the server, + to generate a unique name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules as the Name + field, and may be truncated by the length of the suffix required to + make the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return a 409 - + instead, it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time allotted, + and the client should retry (optionally after the time indicated in + the Retry-After header). \n Applied only if Name is not specified. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + generation: + description: A sequence number representing a specific generation of + the desired state. Populated by the system. Read-only. + format: int64 + type: integer + initializers: + description: "An initializer is a controller which enforces some system + invariant at object creation time. This field is a list of initializers + that have not yet acted on this object. If nil or empty, this object + has been completely initialized. Otherwise, the object is considered + uninitialized and is hidden (in list/watch and get calls) from clients + that haven't explicitly asked to observe uninitialized objects. \n + When an object is created, the system will populate this list with + the current set of initializers. Only privileged users may set or + modify this list. Once it is empty, it may not be modified further + by any user. \n DEPRECATED - initializers are an alpha field and will + be removed in v1.15." + properties: + pending: + description: Pending is a list of initializers that must execute + in order before this object is visible. When the last pending + initializer is removed, and no failing result is set, the initializers + struct will be set to nil and the object is considered as initialized + and visible to all clients. + items: + description: Initializer is information about an initializer that + has not yet completed. + properties: + name: + description: name of the process that is responsible for initializing + this object. + type: string + required: + - name + type: object + type: array + result: + description: If result is set with the Failure field, the object + will be persisted to storage and then deleted, ensuring that other + clients can observe the deletion. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + code: + description: Suggested HTTP return code for this status, 0 if + not set. + format: int32 + type: integer + details: + description: Extended data associated with the reason. Each + reason may define its own extended details. This field is + optional and the data returned is not guaranteed to conform + to any schema except that defined by the reason type. + properties: + causes: + description: The Causes array includes more details associated + with the StatusReason failure. Not all StatusReasons may + provide detailed causes. + items: + description: StatusCause provides more information about + an api.Status failure, including cases when multiple + errors are encountered. + properties: + field: + description: "The field of the resource that has caused + this error, as named by its JSON serialization. + May include dot and postfix notation for nested + attributes. Arrays are zero-indexed. Fields may + appear more than once in an array of causes due + to fields having multiple errors. Optional. \n Examples: + \ \"name\" - the field \"name\" on the current + resource \"items[0].name\" - the field \"name\" + on the first array entry in \"items\"" + type: string + message: + description: A human-readable description of the cause + of the error. This field may be presented as-is + to a reader. + type: string + reason: + description: A machine-readable description of the + cause of the error. If this value is empty there + is no information available. + type: string + type: object + type: array + group: + description: The group attribute of the resource associated + with the status StatusReason. + type: string + kind: + description: 'The kind attribute of the resource associated + with the status StatusReason. On some operations may differ + from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: The name attribute of the resource associated + with the status StatusReason (when there is a single name + which can be described). + type: string + retryAfterSeconds: + description: If specified, the time in seconds before the + operation should be retried. Some errors may indicate + the client must take an alternate action - for those errors + this field may indicate how long to wait before taking + the alternate action. + format: int32 + type: integer + uid: + description: 'UID of the resource. (when there is a single + resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + type: object + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + message: + description: A human-readable description of the status of this + operation. + type: string + metadata: + description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + properties: + continue: + description: continue may be set if the user set a limit + on the number of items returned, and indicates that the + server has more data available. The value is opaque and + may be used to issue another request to the endpoint that + served this list to retrieve the next set of available + objects. Continuing a consistent list may not be possible + if the server configuration has changed or more than a + few minutes have passed. The resourceVersion field returned + when using this continue value will be identical to the + value in the first response, unless you have received + this token from an error message. + type: string + resourceVersion: + description: 'String that identifies the server''s internal + version of this object that can be used by clients to + determine when objects have changed. Value must be treated + as opaque by clients and passed unmodified back to the + server. Populated by the system. Read-only. More info: + https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + selfLink: + description: selfLink is a URL representing this object. + Populated by the system. Read-only. + type: string + type: object + reason: + description: A machine-readable description of why this operation + is in the "Failure" status. If this value is empty there is + no information available. A Reason clarifies an HTTP status + code but does not override it. + type: string + status: + description: 'Status of the operation. One of: "Success" or + "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + type: string + type: object + required: + - pending + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize + and categorize (scope and select) objects. May match selectors of + replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: "ManagedFields maps workflow-id and version to the set + of fields that are managed by that workflow. This is mostly for internal + housekeeping, and users typically shouldn't need to set or understand + this field. A workflow can be the user's name, a controller's name, + or the name of a specific apply path like \"ci-cd\". The set of fields + is always in the version that the workflow used when modifying the + object. \n This field is alpha and can be changed or removed without + notice." + items: + description: ManagedFieldsEntry is a workflow-id, a FieldSet and the + group version of the resource that the fieldset applies to. + properties: + apiVersion: + description: APIVersion defines the version of this resource that + this field set applies to. The format is "group/version" just + like the top-level APIVersion field. It is necessary to track + the version of a field set because it cannot be automatically + converted. + type: string + fields: + additionalProperties: true + description: Fields identifies a set of fields. + type: object + manager: + description: Manager is an identifier of the workflow managing + these fields. + type: string + operation: + description: Operation is the type of operation which lead to + this ManagedFieldsEntry being created. The only valid values + for this field are 'Apply' and 'Update'. + type: string + time: + description: Time is timestamp of when these fields were set. + It should always be empty if Operation is 'Apply' + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is required when + creating resources, although some resources may allow a client to + request the generation of an appropriate name automatically. Name + is primarily intended for creation idempotence and configuration definition. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. + An empty namespace is equivalent to the \"default\" namespace, but + \"default\" is the canonical representation. Not all objects are required + to be scoped to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects + in the list have been deleted, this object will be garbage collected. + If this object is managed by a controller, then an entry in this list + will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let you + identify an owning object. An owning object must be in the same + namespace as the dependent, or be cluster-scoped, so there is no + namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the key-value + store until this reference is removed. Defaults to false. To + set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: "An opaque value that represents the internal version of + this object that can be used by clients to determine when objects + have changed. May be used for optimistic concurrency, change detection, + and the watch operation on a resource or set of resources. Clients + must treat these values as opaque and passed unmodified back to the + server. They may only be valid for a particular resource or set of + resources. \n Populated by the system. Read-only. Value must be treated + as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" + type: string + selfLink: + description: SelfLink is a URL representing this object. Populated by + the system. Read-only. + type: string + uid: + description: "UID is the unique in time and space value for this object. + It is typically generated by the server on successful creation of + a resource and is not allowed to change on PUT operations. \n Populated + by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + type: string + type: object + output: + properties: + connectionString1: + type: string + connectionString2: + type: string + key1: + type: string + key2: + type: string + storageAccountName: + type: string + type: object + spec: + description: StorageSpec defines the desired state of Storage + properties: + accessTier: + description: AccessTier enumerates the values for access tier. Only + one of the following access tiers may be specified. If none of the + following access tiers is specified, the default one is Hot. + enum: + - Cool + - Hot + type: string + kind: + description: StorageKind enumerates the values for kind. Only one of + the following kinds may be specified. If none of the following kinds + is specified, the default one is StorageV2. + enum: + - BlobStorage + - BlockBlobStorage + - FileStorage + - Storage + - StorageV2 + type: string + location: + minLength: 0 + type: string + resourceGroup: + type: string + sku: + description: Sku the SKU of the storage account. + properties: + name: + description: 'Name - The SKU name. Required for account creation; + optional for update. Possible values include: ''StandardLRS'', + ''StandardGRS'', ''StandardRAGRS'', ''StandardZRS'', ''PremiumLRS'', + ''PremiumZRS'', ''StandardGZRS'', ''StandardRAGZRS''' + enum: + - Premium_LRS + - Premium_ZRS + - Standard_GRS + - Standard_GZRS + - Standard_LRS + - Standard_RAGRS + - Standard_RAGZRS + - Standard_ZRS + type: string + type: object + supportsHttpsTrafficOnly: + type: boolean + required: + - resourceGroup + type: object + status: + description: StorageStatus defines the observed state of Storage + properties: + provisioned: + type: boolean + provisioning: + description: DeploymentName string `json:"deploymentName,omitempty"` + ProvisioningState string `json:"provisioningState,omitempty"` Generation int64 `json:"generation,omitempty"` + type: boolean + type: object + type: object + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 24e4d6bcd14..75e2efb5492 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,6 +2,9 @@ # since it depends on service name and namespace that are out of this kustomize package. # It should be run by config/default resources: +- bases/azure.microsoft.com_storages.yaml +- bases/azure.microsoft.com_cosmosdbs.yaml +- bases/azure.microsoft.com_rediscaches.yaml - bases/azure.microsoft.com_eventhubs.yaml - bases/azure.microsoft.com_resourcegroups.yaml - bases/azure.microsoft.com_eventhubnamespaces.yaml diff --git a/config/crd/patches/cainjection_in_cosmosdbs.yaml b/config/crd/patches/cainjection_in_cosmosdbs.yaml new file mode 100644 index 00000000000..d46088c8c24 --- /dev/null +++ b/config/crd/patches/cainjection_in_cosmosdbs.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME) + name: cosmosdbs.azure.microsoft.com diff --git a/config/crd/patches/cainjection_in_rediscaches.yaml b/config/crd/patches/cainjection_in_rediscaches.yaml new file mode 100644 index 00000000000..0945e3cbe00 --- /dev/null +++ b/config/crd/patches/cainjection_in_rediscaches.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: rediscaches.azure.microsoft.com diff --git a/config/crd/patches/cainjection_in_storages.yaml b/config/crd/patches/cainjection_in_storages.yaml new file mode 100644 index 00000000000..a5669e97b77 --- /dev/null +++ b/config/crd/patches/cainjection_in_storages.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME) + name: storages.azure.microsoft.com diff --git a/config/crd/patches/webhook_in_cosmosdbs.yaml b/config/crd/patches/webhook_in_cosmosdbs.yaml new file mode 100644 index 00000000000..d06ad93537e --- /dev/null +++ b/config/crd/patches/webhook_in_cosmosdbs.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cosmosdbs.azure.microsoft.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_rediscaches.yaml b/config/crd/patches/webhook_in_rediscaches.yaml new file mode 100644 index 00000000000..21f9b0e36d9 --- /dev/null +++ b/config/crd/patches/webhook_in_rediscaches.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: rediscaches.azure.microsoft.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_storages.yaml b/config/crd/patches/webhook_in_storages.yaml new file mode 100644 index 00000000000..51d315b7a6e --- /dev/null +++ b/config/crd/patches/webhook_in_storages.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: storages.azure.microsoft.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/default/manager_image_patch.yaml-e b/config/default/manager_image_patch.yaml-e deleted file mode 100644 index 6e060cb4438..00000000000 --- a/config/default/manager_image_patch.yaml-e +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - # Change the value of image field below to your controller image URL - - image: IMAGE_URL - name: manager - env: - - name: AZURE_CLIENT_ID - valueFrom: - secretKeyRef: - name: azureoperatorsettings - key: AZURE_CLIENT_ID - - name: AZURE_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: azureoperatorsettings - key: AZURE_CLIENT_SECRET - - name: AZURE_TENANT_ID - valueFrom: - secretKeyRef: - name: azureoperatorsettings - key: AZURE_TENANT_ID - - name: AZURE_SUBSCRIPTION_ID - valueFrom: - secretKeyRef: - name: azureoperatorsettings - key: AZURE_SUBSCRIPTION_ID - #requeue after time in seconds" - - name: REQUEUE_AFTER - value: "30" diff --git a/config/default/manager_role_patch.yaml b/config/default/manager_role_patch.yaml new file mode 100644 index 00000000000..958bbcc3a46 --- /dev/null +++ b/config/default/manager_role_patch.yaml @@ -0,0 +1,80 @@ + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - azure.microsoft.com + resources: + - cosmosdbs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - azure.microsoft.com + resources: + - cosmosdbs/status + verbs: + - get + - patch + - update +- apiGroups: + - azure.microsoft.com + resources: + - rediscaches + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - azure.microsoft.com + resources: + - rediscaches/status + verbs: + - get + - patch + - update +- apiGroups: + - azure.microsoft.com + resources: + - storages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - azure.microsoft.com + resources: + - storages/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 15b0c5182e6..df285ac3214 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -9,8 +9,7 @@ rules: - apiGroups: - azure.microsoft.com resources: - - keyvaults - - consumergroups + - eventhubs verbs: - create - delete @@ -22,28 +21,27 @@ rules: - apiGroups: - azure.microsoft.com resources: - - sqldatabases/status + - sqlservers verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - - apps + - azure.microsoft.com resources: - - deployments + - storages/status verbs: - - create - - delete - get - - list - patch - update - - watch - apiGroups: - - "" + - azure.microsoft.com resources: - - secrets - - resourcegroups/status + - sqlservers/status verbs: - get - patch @@ -51,7 +49,7 @@ rules: - apiGroups: - azure.microsoft.com resources: - - consumergroups + - cosmosdbs verbs: - create - delete @@ -63,16 +61,15 @@ rules: - apiGroups: - azure.microsoft.com resources: - - eventhubs/status + - resourcegroups/status verbs: - get - patch - update - apiGroups: - - azure.microsoft.com - "" resources: - - eventhubnamespaces + - secrets verbs: - create - delete @@ -82,14 +79,16 @@ rules: - update - watch - apiGroups: - - azure.microsoft.com - resources: - - eventhubnamespaces/status - - "" + - apps resources: - - events + - deployments verbs: - create + - delete + - get + - list + - patch + - update - watch - apiGroups: - apps @@ -102,7 +101,14 @@ rules: - apiGroups: - azure.microsoft.com resources: - - keyvaults/status + - cosmosdbs/status + verbs: + - get + - patch + - update +- apiGroups: + - azure.microsoft.com + resources: - eventhubnamespaces/status verbs: - get @@ -111,7 +117,6 @@ rules: - apiGroups: - azure.microsoft.com resources: - - sqldatabases - keyvaults verbs: - create @@ -124,16 +129,12 @@ rules: - apiGroups: - azure.microsoft.com resources: - - sqlservers/status - - keyvaults/status + - rediscaches/status verbs: - get - patch - update - apiGroups: - - apps - resources: - - deployments/status - azure.microsoft.com resources: - resourcegroups @@ -148,7 +149,7 @@ rules: - apiGroups: - azure.microsoft.com resources: - - sqlservers + - sqldatabases verbs: - create - delete @@ -160,22 +161,27 @@ rules: - apiGroups: - azure.microsoft.com resources: - - events + - sqldatabases/status verbs: - - create + - get - patch + - update - apiGroups: - azure.microsoft.com resources: - - eventhubs/status + - storages verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - azure.microsoft.com resources: - - sqldatabases + - consumergroups verbs: - create - delete @@ -187,42 +193,53 @@ rules: - apiGroups: - azure.microsoft.com resources: - - sqlservers/status + - eventhubnamespaces verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - azure.microsoft.com resources: - - consumergroups/status + - sqlfirewallrules verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - azure.microsoft.com resources: - - eventhubs + - sqlfirewallrules/status verbs: - - create - - delete - get - - list - patch - update - - watch +- apiGroups: + - azure.microsoft.com + resources: + - events + verbs: + - create + - patch - apiGroups: - "" resources: - - eventhubnamespaces + - events verbs: - create - watch - apiGroups: - azure.microsoft.com resources: - - consumergroups/status + - keyvaults/status verbs: - get - patch @@ -230,7 +247,19 @@ rules: - apiGroups: - azure.microsoft.com resources: - - sqldatabases/status + - rediscaches + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - azure.microsoft.com + resources: + - consumergroups/status verbs: - get - patch @@ -238,12 +267,8 @@ rules: - apiGroups: - azure.microsoft.com resources: - - deployments + - eventhubs/status verbs: - - create - - delete - get - - list - patch - update - - watch diff --git a/config/samples/.gitkeep b/config/samples/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/config/samples/azure_v1_cosmosdb.yaml b/config/samples/azure_v1_cosmosdb.yaml new file mode 100644 index 00000000000..0741c205517 --- /dev/null +++ b/config/samples/azure_v1_cosmosdb.yaml @@ -0,0 +1,10 @@ +apiVersion: azure.microsoft.com/v1 +kind: CosmosDB +metadata: + name: cosmosdb-sample1908xyzkj +spec: + kind: GlobalDocumentDB + location: westus + resourceGroup: resourcegroup-sample-1908 + properties: + databaseAccountOfferType: Standard diff --git a/config/samples/azure_v1_eventhub.yaml b/config/samples/azure_v1_eventhub.yaml index 89f98afc3b1..5c57b014ab1 100644 --- a/config/samples/azure_v1_eventhub.yaml +++ b/config/samples/azure_v1_eventhub.yaml @@ -4,12 +4,12 @@ metadata: name: eventhub-sample-2307-09 spec: location: "westus" - resourcegroup: "resourcegroup-sample-1907" + resourceGroup: "resourcegroup-sample-1907" namespace: "eventhubnamespace-sample-6" properties: - messageretentionindays: 7 - partitioncount: 1 - authorizationrule: + messageRetentionInDays: 7 + partitionCount: 2 + authorizationRule: name : "RootManageSharedAccessKey" rights: - "Listen" diff --git a/config/samples/azure_v1_eventhub_capture.yaml b/config/samples/azure_v1_eventhub_capture.yaml new file mode 100644 index 00000000000..a074c643e2e --- /dev/null +++ b/config/samples/azure_v1_eventhub_capture.yaml @@ -0,0 +1,29 @@ +apiVersion: azure.microsoft.com/v1 +kind: Eventhub +metadata: + name: eventhub-sample-2307-10-capture +spec: + location: "westus" + resourceGroup: "resourcegroup-sample-1907" + namespace: "eventhubnamespace-sample-6" + properties: + messageRetentionInDays: 7 + partitionCount: 2 + captureDescription: + # Note that the storage account and blob container below must already exist + destination: + archiveNameFormat: "{Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}" + blobContainer: "capturecontainer" + name: "EventHubArchive.AzureBlockBlob" + storageAccount: + resourceGroup: "my-resource-group" + accountName: "storageaccountauv1" + enabled: true + sizeLimitInBytes: 524288000 + intervalInSeconds: 120 + authorizationRule: + name : "RootManageSharedAccessKey" + rights: + - "Listen" + - "Manage" + - "Send" diff --git a/config/samples/azure_v1_eventhubnamespace.yaml b/config/samples/azure_v1_eventhubnamespace.yaml index dbb1af30ab5..83b61d10c53 100644 --- a/config/samples/azure_v1_eventhubnamespace.yaml +++ b/config/samples/azure_v1_eventhubnamespace.yaml @@ -4,12 +4,12 @@ metadata: name: eventhubnamespace-sample-6 spec: location: "westus" - resourcegroup: "resourcegroup-sample-1907" + resourceGroup: "resourcegroup-sample-1907" sku: name: "Standard" tier: "Standard" capacity: 1 properties: - isautoinflateenabled: false - maximumthroughputunits: 0 - kafkaenabled: false + isAutoInflateEnabled: false + maximumThroughputUnits: 0 + kafkaEnabled: false diff --git a/config/samples/azure_v1_rediscache.yaml b/config/samples/azure_v1_rediscache.yaml new file mode 100644 index 00000000000..a78959b3dfd --- /dev/null +++ b/config/samples/azure_v1_rediscache.yaml @@ -0,0 +1,13 @@ +apiVersion: azure.microsoft.com/v1 +kind: RedisCache +metadata: + name: rediscache-sample1908xyzkj +spec: + location: westus + resourceGroup: resourcegroup-sample-1908 + properties: + sku: + name: Basic + family: C + capacity: 1 + enableNonSslPort: true diff --git a/config/samples/azure_v1_storage.yaml b/config/samples/azure_v1_storage.yaml new file mode 100644 index 00000000000..88a1d9c1671 --- /dev/null +++ b/config/samples/azure_v1_storage.yaml @@ -0,0 +1,12 @@ +apiVersion: azure.microsoft.com/v1 +kind: Storage +metadata: + name: storagesample1908xyzkj +spec: + location: westus + resourceGroup: resourcegroup-sample-1908 + sku: + name: Standard_RAGRS + kind: StorageV2 + accessTier: Hot + supportsHttpsTrafficOnly: true diff --git a/controllers/consumergroup_controller_test.go b/controllers/consumergroup_controller_test.go index 21e82883515..ca457e6395b 100644 --- a/controllers/consumergroup_controller_test.go +++ b/controllers/consumergroup_controller_test.go @@ -43,7 +43,6 @@ var _ = Describe("ConsumerGroup Controller", func() { rgName = resourceGroupName ehnName = eventhubNamespaceName ehName = eventhubName - }) AfterEach(func() { diff --git a/controllers/cosmosdb_controller.go b/controllers/cosmosdb_controller.go new file mode 100644 index 00000000000..4691d0619df --- /dev/null +++ b/controllers/cosmosdb_controller.go @@ -0,0 +1,259 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package controllers + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/errhelp" + "github.com/Azure/azure-service-operator/pkg/helpers" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/cosmosdbs" + "k8s.io/client-go/tools/record" +) + +const cosmosDBFinalizerName = "cosmosdb.finalizers.azure.com" + +// CosmosDBReconciler reconciles a CosmosDB object +type CosmosDBReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder + RequeueTime time.Duration +} + +// +kubebuilder:rbac:groups=azure.microsoft.com,resources=cosmosdbs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=azure.microsoft.com,resources=cosmosdbs/status,verbs=get;update;patch + +// Reconcile function does the main reconciliation loop of the operator +func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + log := r.Log.WithValues("cosmosdb", req.NamespacedName) + + // Fetch the CosmosDB instance + var instance azurev1.CosmosDB + + requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) + if err != nil { + requeueAfter = 30 + } + + if err := r.Get(ctx, req.NamespacedName, &instance); err != nil { + log.Error(err, "unable to fetch CosmosDB") + // we'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + log.Info("Getting CosmosDB Account", "CosmosDB.Namespace", instance.Namespace, "CosmosDB.Name", instance.Name) + log.V(1).Info("Describing CosmosDB Account", "CosmosDB", instance) + + if helpers.IsBeingDeleted(&instance) { + if helpers.HasFinalizer(&instance, cosmosDBFinalizerName) { + if err := r.deleteExternal(&instance); err != nil { + log.Info("Error", "Delete CosmosDB failed with ", err) + return ctrl.Result{}, err + } + + helpers.RemoveFinalizer(&instance, cosmosDBFinalizerName) + if err := r.Update(context.Background(), &instance); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil + } + + if !helpers.HasFinalizer(&instance, cosmosDBFinalizerName) { + if err := r.addFinalizer(&instance); err != nil { + log.Info("Error", "Adding cosmosDB finalizer failed with ", err) + return ctrl.Result{}, err + } + } + + if !instance.IsSubmitted() { + if err := r.reconcileExternal(&instance); err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + log.Info("Requeuing as the async operation is not complete") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second * time.Duration(requeueAfter), + }, nil + } + return ctrl.Result{}, fmt.Errorf("error reconciling cosmosdb in azure: %v", err) + } + return ctrl.Result{}, nil + } + + r.Recorder.Event(&instance, "Normal", "Provisioned", "CosmosDB "+instance.ObjectMeta.Name+" provisioned ") + return ctrl.Result{}, nil +} + +func (r *CosmosDBReconciler) addFinalizer(instance *azurev1.CosmosDB) error { + helpers.AddFinalizer(instance, cosmosDBFinalizerName) + err := r.Update(context.Background(), instance) + if err != nil { + return fmt.Errorf("failed to update finalizer: %v", err) + } + r.Recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("finalizer %s added", cosmosDBFinalizerName)) + return nil +} + +func (r *CosmosDBReconciler) reconcileExternal(instance *azurev1.CosmosDB) error { + ctx := context.Background() + location := instance.Spec.Location + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + kind := instance.Spec.Kind + dbType := instance.Spec.Properties.DatabaseAccountOfferType + + // write information back to instance + instance.Status.Provisioning = true + + if err := r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + _, err := cosmosdbs.CreateCosmosDB(ctx, groupName, name, location, kind, dbType, nil) + if err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + r.Recorder.Event(instance, "Normal", "Provisioning", name+" provisioning") + return err + } + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't create resource in azure") + instance.Status.Provisioning = false + errUpdate := r.Status().Update(ctx, instance) + if errUpdate != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + return err + } + + instance.Status.Provisioning = false + instance.Status.Provisioned = true + + if err = r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + return nil +} + +func (r *CosmosDBReconciler) deleteExternal(instance *azurev1.CosmosDB) error { + ctx := context.Background() + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + _, err := cosmosdbs.DeleteCosmosDB(ctx, groupName, name) + if err != nil { + if errhelp.IsStatusCode204(err) { + r.Recorder.Event(instance, "Warning", "DoesNotExist", "Resource to delete does not exist") + return nil + } + + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't delete resouce in azure") + return err + } + + r.Recorder.Event(instance, "Normal", "Deleted", name+" deleted") + return nil +} + +// SetupWithManager sets up the controller functions +func (r *CosmosDBReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&azurev1.CosmosDB{}). + Complete(r) +} + +/* Below code was from prior to refactor. + Left here for future reference for pulling out values post deployment. + + +func (r *CosmosDBReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.CosmosDB, error) { + ctx := context.Background() + log := r.Log.WithValues("cosmosdb", req.NamespacedName) + + resource := &servicev1alpha1.CosmosDB{} + r.Get(ctx, req.NamespacedName, resource) + log.Info("Getting CosmosDB Account", "CosmosDB.Namespace", resource.Namespace, "CosmosDB.Name", resource.Name) + + resourceCopy := resource.DeepCopy() + resourceCopy.Status.DeploymentName = deploymentName + resourceCopy.Status.ProvisioningState = provisioningState + + err := r.Status().Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update CosmosDB status") + return nil, err + } + log.V(1).Info("Updated Status", "CosmosDB.Namespace", resourceCopy.Namespace, "CosmosDB.Name", resourceCopy.Name, "CosmosDB.Status", resourceCopy.Status) + + if helpers.IsDeploymentComplete(provisioningState) { + if outputs != nil { + resourceCopy.Output.CosmosDBName = helpers.GetOutput(outputs, "cosmosDBName") + resourceCopy.Output.PrimaryMasterKey = helpers.GetOutput(outputs, "primaryMasterKey") + } + + err := r.syncAdditionalResourcesAndOutput(req, resourceCopy) + if err != nil { + log.Error(err, "error syncing resources") + return nil, err + } + log.V(1).Info("Updated additional resources", "CosmosDB.Namespace", resourceCopy.Namespace, "CosmosDB.Name", resourceCopy.Name, "CosmosDB.AdditionalResources", resourceCopy.AdditionalResources, "CosmosDB.Output", resourceCopy.Output) + } + + return resourceCopy, nil +} + +func (r *CosmosDBReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.CosmosDB) (err error) { + ctx := context.Background() + log := r.Log.WithValues("cosmosdb", req.NamespacedName) + + secrets := []string{} + secretData := map[string]string{ + "cosmosDBName": "{{.Obj.Output.CosmosDBName}}", + "primaryMasterKey": "{{.Obj.Output.PrimaryMasterKey}}", + } + secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) + secrets = append(secrets, secret) + + resourceCopy := s.DeepCopy() + resourceCopy.AdditionalResources.Secrets = secrets + + err = r.Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update CosmosDB status") + return err + } + + return nil +}*/ diff --git a/controllers/eventhub_controller.go b/controllers/eventhub_controller.go index 6712fa8b2ee..9b31540f5ea 100644 --- a/controllers/eventhub_controller.go +++ b/controllers/eventhub_controller.go @@ -20,6 +20,9 @@ import ( "fmt" "time" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/go-autorest/autorest/to" + model "github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub" azurev1 "github.com/Azure/azure-service-operator/api/v1" "github.com/Azure/azure-service-operator/pkg/errhelp" @@ -124,6 +127,7 @@ func (r *EventhubReconciler) reconcileExternal(instance *azurev1.Eventhub) error resourcegroup := instance.Spec.ResourceGroup partitionCount := instance.Spec.Properties.PartitionCount messageRetentionInDays := instance.Spec.Properties.MessageRetentionInDays + captureDescription := instance.Spec.Properties.CaptureDescription secretName := instance.Spec.SecretName if secretName == "" { @@ -160,7 +164,9 @@ func (r *EventhubReconciler) reconcileExternal(instance *azurev1.Eventhub) error r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") } - _, err = eventhubsresourcemanager.CreateHub(ctx, resourcegroup, eventhubNamespace, eventhubName, messageRetentionInDays, partitionCount) + capturePtr := getCaptureDescriptionPtr(captureDescription) + + _, err = eventhubsresourcemanager.CreateHub(ctx, resourcegroup, eventhubNamespace, eventhubName, messageRetentionInDays, partitionCount, capturePtr) if err != nil { r.Recorder.Event(instance, "Warning", "Failed", "Couldn't create resource in azure") instance.Status.Provisioning = false @@ -195,6 +201,35 @@ func (r *EventhubReconciler) reconcileExternal(instance *azurev1.Eventhub) error return nil } +const storageAccountResourceFmt = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Storage/storageAccounts/%s" + +func getCaptureDescriptionPtr(captureDescription azurev1.CaptureDescription) *model.CaptureDescription { + // add capture details + var capturePtr *model.CaptureDescription + + storage := captureDescription.Destination.StorageAccount + storageAccountResourceId := fmt.Sprintf(storageAccountResourceFmt, config.SubscriptionID(), storage.ResourceGroup, storage.AccountName) + + if captureDescription.Enabled { + capturePtr = &model.CaptureDescription{ + Enabled: to.BoolPtr(true), + Encoding: model.Avro, + IntervalInSeconds: &captureDescription.IntervalInSeconds, + SizeLimitInBytes: &captureDescription.SizeLimitInBytes, + Destination: &model.Destination{ + Name: &captureDescription.Destination.Name, + DestinationProperties: &model.DestinationProperties{ + StorageAccountResourceID: &storageAccountResourceId, + BlobContainer: &captureDescription.Destination.BlobContainer, + ArchiveNameFormat: &captureDescription.Destination.ArchiveNameFormat, + }, + }, + SkipEmptyArchives: to.BoolPtr(true), + } + } + return capturePtr +} + func (r *EventhubReconciler) deleteEventhub(instance *azurev1.Eventhub) error { ctx := context.Background() diff --git a/controllers/eventhub_controller_test.go b/controllers/eventhub_controller_test.go index ede19e66777..60ac097d810 100644 --- a/controllers/eventhub_controller_test.go +++ b/controllers/eventhub_controller_test.go @@ -190,7 +190,7 @@ var _ = Describe("EventHub Controller", func() { ResourceGroup: rgName, Properties: azurev1.EventhubProperties{ MessageRetentionInDays: 7, - PartitionCount: 1, + PartitionCount: 2, }, AuthorizationRule: azurev1.EventhubAuthorizationRule{ Name: "RootManageSharedAccessKey", diff --git a/controllers/eventhubnamespace_controller_test.go b/controllers/eventhubnamespace_controller_test.go index 83d6bcd932a..cb0ec6cfd68 100644 --- a/controllers/eventhubnamespace_controller_test.go +++ b/controllers/eventhubnamespace_controller_test.go @@ -39,7 +39,6 @@ var _ = Describe("EventHubNamespace Controller", func() { BeforeEach(func() { // Add any setup steps that needs to be executed before each test rgName = resourceGroupName - }) AfterEach(func() { @@ -83,7 +82,6 @@ var _ = Describe("EventHubNamespace Controller", func() { It("should validate resourcegroup exist before creating eventhubnamespaces", func() { - resourceGroupName := "t-rg-dev-eh-" + helpers.RandomString(10) eventhubNamespaceName := "t-ns-dev-eh-" + helpers.RandomString(10) // Create the EventHubNamespace object and expect the Reconcile to be created diff --git a/controllers/rediscache_controller.go b/controllers/rediscache_controller.go new file mode 100644 index 00000000000..ff0e0808938 --- /dev/null +++ b/controllers/rediscache_controller.go @@ -0,0 +1,260 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package controllers + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/errhelp" + "github.com/Azure/azure-service-operator/pkg/helpers" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/rediscaches" + "k8s.io/client-go/tools/record" +) + +const redisCacheFinalizerName = "rediscache.finalizers.azure.com" + +// RedisCacheReconciler reconciles a RedisCache object +type RedisCacheReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder + RequeueTime time.Duration +} + +// +kubebuilder:rbac:groups=azure.microsoft.com,resources=rediscaches,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=azure.microsoft.com,resources=rediscaches/status,verbs=get;update;patch + +func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + log := r.Log.WithValues("rediscache", req.NamespacedName) + + // Fetch the Redis Cache instance + var instance azurev1.RedisCache + + requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) + if err != nil { + requeueAfter = 30 + } + + if err := r.Get(ctx, req.NamespacedName, &instance); err != nil { + log.Error(err, "unable to fetch RedisCache") + // we'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + log.Info("Getting Redis Cache", "RedisCache.Namespace", instance.Namespace, "RedisCache.Name", instance.Name) + log.V(1).Info("Describing Redis Cache", "RedisCache", instance) + + if helpers.IsBeingDeleted(&instance) { + if helpers.HasFinalizer(&instance, redisCacheFinalizerName) { + if err := r.deleteExternal(&instance); err != nil { + log.Info("Error", "Delete Redis Cache failed with ", err) + return ctrl.Result{}, err + } + + helpers.RemoveFinalizer(&instance, redisCacheFinalizerName) + if err := r.Update(context.Background(), &instance); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil + } + + if !helpers.HasFinalizer(&instance, redisCacheFinalizerName) { + if err := r.addFinalizer(&instance); err != nil { + log.Info("Error", "Adding redis cache finalizer failed with ", err) + return ctrl.Result{}, err + } + } + + if !instance.IsSubmitted() { + if err := r.reconcileExternal(&instance); err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + log.Info("Requeuing as the async operation is not complete") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second * time.Duration(requeueAfter), + }, nil + } + return ctrl.Result{}, fmt.Errorf("error reconciling redis cache in azure: %v", err) + } + return ctrl.Result{}, nil + } + + r.Recorder.Event(&instance, "Normal", "Provisioned", "RedisCache "+instance.ObjectMeta.Name+" provisioned ") + return ctrl.Result{}, nil +} + +func (r *RedisCacheReconciler) addFinalizer(instance *azurev1.RedisCache) error { + helpers.AddFinalizer(instance, redisCacheFinalizerName) + err := r.Update(context.Background(), instance) + if err != nil { + return fmt.Errorf("failed to update finalizer: %v", err) + } + r.Recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("finalizer %s added", redisCacheFinalizerName)) + return nil +} + +func (r *RedisCacheReconciler) reconcileExternal(instance *azurev1.RedisCache) error { + ctx := context.Background() + location := instance.Spec.Location + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + sku := instance.Spec.Properties.Sku + enableNonSSLPort := instance.Spec.Properties.EnableNonSslPort + + // write information back to instance + instance.Status.Provisioning = true + + if err := r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + _, err := rediscaches.CreateRedisCache(ctx, groupName, name, location, sku, enableNonSSLPort, nil) + if err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + r.Recorder.Event(instance, "Normal", "Provisioning", name+" provisioning") + return err + } + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't create resource in azure") + instance.Status.Provisioning = false + errUpdate := r.Status().Update(ctx, instance) + if errUpdate != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + return err + } + + instance.Status.Provisioning = false + instance.Status.Provisioned = true + + if err = r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + return nil +} + +func (r *RedisCacheReconciler) deleteExternal(instance *azurev1.RedisCache) error { + ctx := context.Background() + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + _, err := rediscaches.DeleteRedisCache(ctx, groupName, name) + if err != nil { + if errhelp.IsStatusCode204(err) { + r.Recorder.Event(instance, "Warning", "DoesNotExist", "Resource to delete does not exist") + return nil + } + + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't delete resouce in azure") + return err + } + + r.Recorder.Event(instance, "Normal", "Deleted", name+" deleted") + return nil +} + +// SetupWithManager sets up the controller functions +func (r *RedisCacheReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&azurev1.RedisCache{}). + Complete(r) +} + +/* Below code was from prior to refactor. +Left here for future reference for pulling out values post deployment. + +func (r *RedisCacheReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.RedisCache, error) { + ctx := context.Background() + log := r.Log.WithValues("Redis Cache", req.NamespacedName) + + resource := &servicev1alpha1.RedisCache{} + r.Get(ctx, req.NamespacedName, resource) + log.Info("Getting Redis Cache", "RedisCache.Namespace", resource.Namespace, "RedisCache.Name", resource.Name) + + resourceCopy := resource.DeepCopy() + resourceCopy.Status.DeploymentName = deploymentName + resourceCopy.Status.ProvisioningState = provisioningState + + err := r.Status().Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Redis Cache status") + return nil, err + } + log.V(1).Info("Updated Status", "Redis Cache.Namespace", resourceCopy.Namespace, "RedisCache.Name", resourceCopy.Name, "RedisCache.Status", resourceCopy.Status) + + if helpers.IsDeploymentComplete(provisioningState) { + if outputs != nil { + resourceCopy.Output.RedisCacheName = helpers.GetOutput(outputs, "redisCacheName") + resourceCopy.Output.PrimaryKey = helpers.GetOutput(outputs, "primaryKey") + resourceCopy.Output.SecondaryKey = helpers.GetOutput(outputs, "secondaryKey") + } + + err := r.syncAdditionalResourcesAndOutput(req, resourceCopy) + if err != nil { + log.Error(err, "error syncing resources") + return nil, err + } + log.V(1).Info("Updated additional resources", "Storage.Namespace", resourceCopy.Namespace, "RedisCache.Name", resourceCopy.Name, "RedisCache.AdditionalResources", resourceCopy.AdditionalResources, "RedisCache.Output", resourceCopy.Output) + } + + return resourceCopy, nil +} + +func (r *RedisCacheReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.RedisCache) (err error) { + ctx := context.Background() + log := r.Log.WithValues("redisCache", req.NamespacedName) + + secrets := []string{} + secretData := map[string]string{ + "redisCacheName": "{{.Obj.Output.RedisCacheName}}", + "primaryKey": "{{.Obj.Output.PrimaryKey}}", + "secondaryKey": "{{.Obj.Output.SecondaryKey}}", + } + secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) + secrets = append(secrets, secret) + + resourceCopy := s.DeepCopy() + resourceCopy.AdditionalResources.Secrets = secrets + + err = r.Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Redis Cache status") + return err + } + + return nil +} +*/ diff --git a/controllers/resourcegroup_controller_test.go b/controllers/resourcegroup_controller_test.go index 9a7f519c484..31488ede071 100644 --- a/controllers/resourcegroup_controller_test.go +++ b/controllers/resourcegroup_controller_test.go @@ -60,7 +60,7 @@ var _ = Describe("ResourceGroup Controller", func() { Namespace: "default", }, Spec: azurev1.ResourceGroupSpec{ - Location: "westus", + Location: resourcegroupLocation, }, } diff --git a/controllers/sqlserver_controller.go b/controllers/sqlserver_controller.go index 68133c91216..fc22fc10d4c 100644 --- a/controllers/sqlserver_controller.go +++ b/controllers/sqlserver_controller.go @@ -61,7 +61,7 @@ func (r *SqlServerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { if helpers.IsBeingDeleted(&instance) { if helpers.HasFinalizer(&instance, SQLServerFinalizerName) { if err := r.deleteExternal(&instance); err != nil { - log.Info("Delete SqlServer failed with ", err.Error()) + log.Info("Delete SqlServer failed with ", "error", err.Error()) return ctrl.Result{}, err } diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go new file mode 100644 index 00000000000..d96ce920007 --- /dev/null +++ b/controllers/storage_controller.go @@ -0,0 +1,269 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package controllers + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/errhelp" + helpers "github.com/Azure/azure-service-operator/pkg/helpers" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/storages" + "k8s.io/client-go/tools/record" +) + +const storageFinalizerName = "storage.finalizers.azure.com" + +// StorageReconciler reconciles a Storage object +type StorageReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder + RequeueTime time.Duration +} + +// +kubebuilder:rbac:groups=azure.microsoft.com,resources=storages,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=azure.microsoft.com,resources=storages/status,verbs=get;update;patch + +// Reconcile function does the main reconciliation loop of the operator +func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + log := r.Log.WithValues("storage", req.NamespacedName) + + // Fetch the Storage instance + var instance azurev1.Storage + + requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) + if err != nil { + requeueAfter = 30 + } + + if err := r.Get(ctx, req.NamespacedName, &instance); err != nil { + log.Error(err, "unable to retrieve storage resource", "err", err.Error()) + // we'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + //return ctrl.Result{}, helpers.IgnoreKubernetesResourceNotFound(err) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + log.Info("Getting Storage Account", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) + log.V(1).Info("Describing Storage Account", "Storage", instance) + + if helpers.IsBeingDeleted(&instance) { + if helpers.HasFinalizer(&instance, storageFinalizerName) { + if err := r.deleteExternal(&instance); err != nil { + log.Info("Error", "Delete Storage failed with ", err) + return ctrl.Result{}, err + } + + helpers.RemoveFinalizer(&instance, storageFinalizerName) + if err := r.Update(context.Background(), &instance); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil + } + + if !helpers.HasFinalizer(&instance, storageFinalizerName) { + if err := r.addFinalizer(&instance); err != nil { + log.Info("Error", "Adding storage finalizer failed with ", err) + return ctrl.Result{}, err + } + } + + if !instance.IsSubmitted() { + if err := r.reconcileExternal(&instance); err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + log.Info("Requeuing as the async operation is not complete") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second * time.Duration(requeueAfter), + }, nil + } + return ctrl.Result{}, fmt.Errorf("error reconciling storage in azure: %v", err) + } + return ctrl.Result{}, nil + } + + r.Recorder.Event(&instance, "Normal", "Provisioned", "Storage "+instance.ObjectMeta.Name+" provisioned ") + return ctrl.Result{}, nil +} + +func (r *StorageReconciler) addFinalizer(instance *azurev1.Storage) error { + helpers.AddFinalizer(instance, storageFinalizerName) + err := r.Update(context.Background(), instance) + if err != nil { + return fmt.Errorf("failed to update finalizer: %v", err) + } + r.Recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("finalizer %s added", storageFinalizerName)) + return nil +} + +func (r *StorageReconciler) reconcileExternal(instance *azurev1.Storage) error { + ctx := context.Background() + location := instance.Spec.Location + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + sku := instance.Spec.Sku + kind := instance.Spec.Kind + accessTier := instance.Spec.AccessTier + enableHTTPSTrafficOnly := instance.Spec.EnableHTTPSTrafficOnly + + // write information back to instance + instance.Status.Provisioning = true + + if err := r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + _, err := storages.CreateStorage(ctx, groupName, name, location, sku, kind, nil, accessTier, enableHTTPSTrafficOnly) + if err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + r.Recorder.Event(instance, "Normal", "Provisioning", name+" provisioning") + return err + } + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't create resource in azure") + instance.Status.Provisioning = false + errUpdate := r.Status().Update(ctx, instance) + if errUpdate != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + return err + } + + instance.Status.Provisioning = false + instance.Status.Provisioned = true + + if err = r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + return nil +} + +func (r *StorageReconciler) deleteExternal(instance *azurev1.Storage) error { + ctx := context.Background() + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + _, err := storages.DeleteStorage(ctx, groupName, name) + if err != nil { + if errhelp.IsStatusCode204(err) { + r.Recorder.Event(instance, "Warning", "DoesNotExist", "Resource to delete does not exist") + return nil + } + + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't delete resouce in azure") + return err + } + + r.Recorder.Event(instance, "Normal", "Deleted", name+" deleted") + return nil +} + +// SetupWithManager sets up the controller functions +func (r *StorageReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&azurev1.Storage{}). + Complete(r) +} + +/* Below code was from prior to refactor. + Left here for future reference for pulling out values post deployment. + +func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.Storage, error) { + ctx := context.Background() + log := r.Log.WithValues("storage", req.NamespacedName) + + resource := &servicev1alpha1.Storage{} + r.Get(ctx, req.NamespacedName, resource) + log.Info("Getting Storage Account", "Storage.Namespace", resource.Namespace, "Storage.Name", resource.Name) + + resourceCopy := resource.DeepCopy() + resourceCopy.Status.DeploymentName = deploymentName + resourceCopy.Status.ProvisioningState = provisioningState + + err := r.Status().Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Storage status") + return nil, err + } + log.V(1).Info("Updated Status", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.Status", resourceCopy.Status) + + if helpers.IsDeploymentComplete(provisioningState) { + if outputs != nil { + resourceCopy.Output.StorageAccountName = helpers.GetOutput(outputs, "storageAccountName") + resourceCopy.Output.Key1 = helpers.GetOutput(outputs, "key1") + resourceCopy.Output.Key2 = helpers.GetOutput(outputs, "key2") + resourceCopy.Output.ConnectionString1 = helpers.GetOutput(outputs, "connectionString1") + resourceCopy.Output.ConnectionString2 = helpers.GetOutput(outputs, "connectionString2") + } + + err := r.syncAdditionalResourcesAndOutput(req, resourceCopy) + if err != nil { + log.Error(err, "error syncing resources") + return nil, err + } + log.V(1).Info("Updated additional resources", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.AdditionalResources", resourceCopy.AdditionalResources, "Storage.Output", resourceCopy.Output) + } + + return resourceCopy, nil +} + +func (r *StorageReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.Storage) (err error) { + ctx := context.Background() + log := r.Log.WithValues("storage", req.NamespacedName) + + secrets := []string{} + secretData := map[string]string{ + "storageAccountName": "{{.Obj.Output.StorageAccountName}}", + "key1": "{{.Obj.Output.Key1}}", + "key2": "{{.Obj.Output.Key2}}", + "connectionString1": "{{.Obj.Output.ConnectionString1}}", + "connectionString2": "{{.Obj.Output.ConnectionString2}}", + } + secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) + secrets = append(secrets, secret) + + resourceCopy := s.DeepCopy() + resourceCopy.AdditionalResources.Secrets = secrets + + err = r.Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Storage status") + return err + } + + return nil +} +*/ diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 2e164670cb2..cfb891ef650 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,6 +17,7 @@ package controllers import ( "context" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/storages" "os" "path/filepath" "testing" @@ -24,8 +25,7 @@ import ( helpers "github.com/Azure/azure-service-operator/pkg/helpers" azurev1 "github.com/Azure/azure-service-operator/api/v1" - resourcemanagerconfig "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" - + resoucegroupsconfig "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" eventhubs "github.com/Azure/azure-service-operator/pkg/resourcemanager/eventhubs" resoucegroupsresourcemanager "github.com/Azure/azure-service-operator/pkg/resourcemanager/resourcegroups" . "github.com/onsi/ginkgo" @@ -46,13 +46,17 @@ import ( var cfg *rest.Config var k8sClient client.Client + var k8sManager ctrl.Manager + var testEnv *envtest.Environment var resourceGroupName string var resourcegroupLocation string var eventhubNamespaceName string var eventhubName string var namespaceLocation string +var storageAccountName string +var blobContainerName string func TestAPIs(t *testing.T) { t.Parallel() @@ -68,10 +72,24 @@ func TestAPIs(t *testing.T) { []Reporter{envtest.NewlineReporter{}}) } -var _ = BeforeSuite(func(done Done) { +var _ = SynchronizedBeforeSuite(func() []byte { logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + resoucegroupsconfig.ParseEnvironment() + resourceGroupName = "t-rg-dev-controller-" + helpers.RandomString(10) + resourcegroupLocation = resoucegroupsconfig.DefaultLocation() + + eventhubNamespaceName = "t-ns-dev-eh-ns-" + helpers.RandomString(10) + eventhubName = "t-eh-dev-sample-" + helpers.RandomString(10) + namespaceLocation = resoucegroupsconfig.DefaultLocation() + + storageAccountName = "tsadeveh" + helpers.RandomString(10) + blobContainerName = "t-bc-dev-eh-" + helpers.RandomString(10) + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { t := true @@ -84,8 +102,6 @@ var _ = BeforeSuite(func(done Done) { } } - resourcemanagerconfig.LoadSettings() - cfg, err := testEnv.Start() Expect(err).ToNot(HaveOccurred()) Expect(cfg).ToNot(BeNil()) @@ -142,8 +158,10 @@ var _ = BeforeSuite(func(done Done) { Expect(err).ToNot(HaveOccurred()) }() - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) + //k8sClient = k8sManager.GetClient() + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + //Expect(k8sClient).ToNot(BeNil()) // Create the Resourcegroup resource result, _ := resoucegroupsresourcemanager.CheckExistence(context.Background(), resourceGroupName) @@ -155,20 +173,28 @@ var _ = BeforeSuite(func(done Done) { _, err = eventhubs.CreateNamespaceAndWait(context.Background(), resourceGroupName, eventhubNamespaceName, namespaceLocation) // Create the Eventhub resource - _, err = eventhubs.CreateHub(context.Background(), resourceGroupName, eventhubNamespaceName, eventhubName, int32(7), int32(1)) + _, err = eventhubs.CreateHub(context.Background(), resourceGroupName, eventhubNamespaceName, eventhubName, int32(7), int32(1), nil) - close(done) -}, 120) + // Create the Storage Account and Container + _, err = storages.CreateStorage(context.Background(), resourceGroupName, storageAccountName, resourcegroupLocation, azurev1.StorageSku{ + Name: "Standard_LRS", + }, "Storage", map[string]*string{}, "", nil) -var _ = AfterSuite(func(done Done) { + _, err = storages.CreateBlobContainer(context.Background(), resourceGroupName, storageAccountName, blobContainerName) + + return []byte{} +}, func(r []byte) {}, 120) + +var _ = SynchronizedAfterSuite(func() {}, func() { //clean up the resources created for test + //clean up the resources created for test By("tearing down the test environment") + // delete the resource group and contained resources _, _ = resoucegroupsresourcemanager.DeleteGroup(context.Background(), resourceGroupName) err := testEnv.Stop() Expect(err).ToNot(HaveOccurred()) - close(done) }, 60) diff --git a/docs/development.md b/docs/development.md new file mode 100644 index 00000000000..c06ba3be01e --- /dev/null +++ b/docs/development.md @@ -0,0 +1,122 @@ +# Development + +## Prerequisites + +* a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster, e.g. [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough). +* [kubebuilder](https://book.kubebuilder.io/quick-start.html#installation) +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +## Deploy Operator and Test + +### Test it locally + +1. Create Cluster. + + ``` + kind create cluster + export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" + kubectl cluster-info + ``` + +1. Install CRDs. + + ``` + make install + ``` + +1. Run Controller. + + Setup the environment variables: + + ``` + export CLOUD_NAME=AzurePublicCloud + export TENANT_ID= + export SUBSCRIPTION_ID= + export CLIENT_ID= + export CLIENT_SECRET= + ``` + + Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running): + + ``` + make run + ``` + + Refer to [kubebuilder's doc](https://book.kubebuilder.io/quick-start.html#test-it-out-locally). + +1. Create a Custom Resource. + + Create your CR (make sure to edit them first to specify the fields). Example: + + ``` + kubectl apply -f examples/service/v1alpha1/storage.yaml + ``` + +### Test it on a remote cluster + +1. Create Cluster. + + ``` + az aks create -g -n + az aks get-credentials -g -n + kubectl cluster-info + ``` + +1. Install CRDs. + + ``` + make install + ``` + +1. Build and Push the image. + + ``` + IMG= make build-and-push + ``` + + Update kustomize image patch file `config/default/manager_image_patch.yaml` for manager resource manually. + +1. Run Controller. + + Update `config/manager/manager.yaml` with your service principal. + + ``` + make deploy + ``` + +1. Create a Custom Resource. + + Create your CR (make sure to edit them first to specify the fields). Example: + + ``` + kubectl apply -f examples/service/v1alpha1/storage.yaml + ``` + +## Add a New Custom Resource + +### 1. Add a New API + +``` +kubebuilder create api --group service --version v1alpha1 --kind +``` + +Refer to [kubebuilder's doc](https://book.kubebuilder.io/cronjob-tutorial/new-api.html) + +### 2. Design an API + +1. Try to create the specific Azure service, and download the template in the `Review+Create` step. +2. Upload the template to a storage account. For now, we can use the storage account `azureserviceoperator`. +3. Based on the template, we can figure out what the `Spec` should be like. +4. The `Status` should contain the resource group name, which can be used to delete the resource. + +Refer to [kubebuilder's doc](https://book.kubebuilder.io/cronjob-tutorial/api-design.html) + +Note: + +* Don't forget to add `// +kubebuilder:subresource:status` if we want a status subresource. + +* Run `make manifests` if you find the property you add doesn't work. + +### 3. Delete external resource + +[Using Finalizers](https://book.kubebuilder.io/reference/using-finalizers.html) diff --git a/examples/demo/azure-vote-app-redis.yaml b/examples/demo/azure-vote-app-redis.yaml new file mode 100644 index 00000000000..a15c582af8e --- /dev/null +++ b/examples/demo/azure-vote-app-redis.yaml @@ -0,0 +1,65 @@ +apiVersion: azure.microsoft.com/v1alpha1 +kind: RedisCache +metadata: + name: azure-redis +spec: + location: eastus2 + properties: + sku: + name: Basic + family: C + capacity: 1 + enableNonSslPort: true +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-vote-front +spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-front + template: + metadata: + labels: + app: azure-vote-front + spec: + nodeSelector: + "beta.kubernetes.io/os": linux + containers: + - name: azure-vote-front + image: microsoft/azure-vote-front:v1 + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 80 + env: + - name: REDIS_NAME + valueFrom: + secretKeyRef: + name: azure-redis + key: redisCacheName + - name: REDIS + value: $(REDIS_NAME).redis.cache.windows.net + - name: REDIS_PWD + valueFrom: + secretKeyRef: + name: azure-redis + key: primaryKey +--- +apiVersion: v1 +kind: Service +metadata: + name: azure-vote-front +spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: azure-vote-front diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index b92001fb4ed..d82aa77e076 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -11,4 +11,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ \ No newline at end of file +*/ diff --git a/main.go b/main.go index 1da4005f2b6..ba9eb3f6088 100644 --- a/main.go +++ b/main.go @@ -17,13 +17,15 @@ package main import ( "flag" + + "k8s.io/apimachinery/pkg/runtime" + "os" azurev1 "github.com/Azure/azure-service-operator/api/v1" "github.com/Azure/azure-service-operator/controllers" resourcemanagerconfig "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" - "k8s.io/apimachinery/pkg/runtime" kscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" @@ -32,6 +34,10 @@ import ( ) var ( + masterURL, kubeconfig, resources, clusterName string + cloudName, tenantID, subscriptionID, clientID, clientSecret string + useAADPodIdentity bool + scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") ) @@ -71,6 +77,33 @@ func main() { os.Exit(1) } + err = (&controllers.StorageReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Storage"), + Recorder: mgr.GetEventRecorderFor("Storage-controller"), + }).SetupWithManager(mgr) + if err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Storage") + os.Exit(1) + } + err = (&controllers.CosmosDBReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("CosmosDB"), + Recorder: mgr.GetEventRecorderFor("CosmosDB-controller"), + }).SetupWithManager(mgr) + if err != nil { + setupLog.Error(err, "unable to create controller", "controller", "CosmosDB") + os.Exit(1) + } + if err = (&controllers.RedisCacheReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("RedisCache"), + Recorder: mgr.GetEventRecorderFor("RedisCache-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "RedisCache") + os.Exit(1) + } + err = resourcemanagerconfig.LoadSettings() if err != nil { setupLog.Error(err, "unable to parse settings required to provision resources in Azure") @@ -114,8 +147,13 @@ func main() { os.Exit(1) } - if err = (&azurev1.EventhubNamespace{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "EventhubNamespace") + err = (&controllers.ConsumerGroupReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("ConsumerGroup"), + Recorder: mgr.GetEventRecorderFor("ConsumerGroup-controller"), + }).SetupWithManager(mgr) + if err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ConsumerGroup") os.Exit(1) } err = (&controllers.ConsumerGroupReconciler{ diff --git a/pkg/client/deployment/deployment.go b/pkg/client/deployment/deployment.go new file mode 100644 index 00000000000..1ab6e99e044 --- /dev/null +++ b/pkg/client/deployment/deployment.go @@ -0,0 +1,41 @@ +package deployment + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" + + "github.com/Azure/azure-service-operator/pkg/config" + "github.com/Azure/azure-service-operator/pkg/iam" +) + +func getDeploymentsClient() resources.DeploymentsClient { + deployClient := resources.NewDeploymentsClient(config.Instance.SubscriptionID) + a, _ := iam.GetResourceManagementAuthorizer() + deployClient.Authorizer = a + return deployClient +} + +// CreateDeployment creates a template deployment using the +// referenced JSON files for the template and its parameters +func CreateDeployment(ctx context.Context, resourceGroupName, deploymentName string, template, params *map[string]interface{}) error { + deployClient := getDeploymentsClient() + _, err := deployClient.CreateOrUpdate( + ctx, + resourceGroupName, + deploymentName, + resources.Deployment{ + Properties: &resources.DeploymentProperties{ + Template: template, + Parameters: params, + Mode: resources.Incremental, + }, + }, + ) + return err +} + +func GetDeployment(ctx context.Context, resourceGroupName, deploymentName string) (de resources.DeploymentExtended, err error) { + deployClient := getDeploymentsClient() + return deployClient.Get(ctx, resourceGroupName, deploymentName) +} diff --git a/pkg/client/group/group.go b/pkg/client/group/group.go new file mode 100644 index 00000000000..c9dd8742f42 --- /dev/null +++ b/pkg/client/group/group.go @@ -0,0 +1,39 @@ +package group + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" + "github.com/Azure/azure-service-operator/pkg/config" + "github.com/Azure/azure-service-operator/pkg/iam" + "github.com/Azure/go-autorest/autorest/to" +) + +func getGroupsClient() resources.GroupsClient { + groupsClient := resources.NewGroupsClient(config.Instance.SubscriptionID) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + groupsClient.Authorizer = a + return groupsClient +} + +// CreateGroup creates a new resource group named by env var +func CreateGroup(ctx context.Context, groupName, location string, tags map[string]*string) (resources.Group, error) { + groupsClient := getGroupsClient() + return groupsClient.CreateOrUpdate( + ctx, + groupName, + resources.Group{ + Location: to.StringPtr(location), + Tags: tags, + }) +} + +// DeleteGroup removes the resource group named by env var +func DeleteGroup(ctx context.Context, groupName string) (result resources.GroupsDeleteFuture, err error) { + groupsClient := getGroupsClient() + return groupsClient.Delete(ctx, groupName) +} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100644 index 00000000000..cd1ff39795c --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,56 @@ +package config + +import ( + "fmt" + + "github.com/Azure/go-autorest/autorest/azure" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +var Instance *Config + +type Config struct { + KubeClientset kubernetes.Interface + Resources map[string]bool `json:"resources"` + ClusterName string `json:"clusterName"` + CloudName string `json:"cloudName"` + TenantID string `json:"tenantID"` + SubscriptionID string `json:"subscriptionID"` + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + UseAADPodIdentity bool `json:"useAADPodIdentity"` +} + +func getKubeconfig(masterURL, kubeconfig string) (*rest.Config, error) { + if kubeconfig != "" { + return clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) + } + return rest.InClusterConfig() +} + +func CreateKubeClientset(masterURL, kubeconfig string) (kubernetes.Interface, error) { + config, err := getKubeconfig(masterURL, kubeconfig) + if err != nil { + return nil, fmt.Errorf("failed to get k8s config. %+v", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("failed to get k8s client. %+v", err) + } + + return clientset, nil +} + +// Environment() returns an `azure.Environment{...}` for the current cloud. +func Environment() azure.Environment { + cloudName := Instance.CloudName + env, err := azure.EnvironmentFromName(cloudName) + if err != nil { + panic(fmt.Sprintf( + "invalid cloud name '%s' specified, cannot continue\n", cloudName)) + } + return env +} diff --git a/pkg/helpers/deployment.go b/pkg/helpers/deployment.go new file mode 100644 index 00000000000..46bde127113 --- /dev/null +++ b/pkg/helpers/deployment.go @@ -0,0 +1,36 @@ +package helpers + +import ( + "bytes" + "text/template" +) + +// IsDeploymentComplete will dtermine if the deployment is complete +func IsDeploymentComplete(status string) bool { + switch status { + case "Succeeded": + return true + case "Failed": + return true + case "Canceled": + return true + } + return false +} + +// Templatize returns the proper values based on the templating +func Templatize(tempStr string, data interface{}) (resp string, err error) { + t := template.New("templating") + t, err = t.Parse(string(tempStr)) + if err != nil { + return + } + + var tpl bytes.Buffer + err = t.Execute(&tpl, data) + return tpl.String(), err +} + +func GetOutput(outputs interface{}, key string) string { + return outputs.(map[string]interface{})[key].(map[string]interface{})["value"].(string) +} diff --git a/pkg/helpers/helpers.go b/pkg/helpers/helpers.go new file mode 100644 index 00000000000..e033f019db0 --- /dev/null +++ b/pkg/helpers/helpers.go @@ -0,0 +1,43 @@ +package helpers + +import ( + "crypto/md5" + "fmt" + "io" + "regexp" + "strings" + + "github.com/Azure/go-autorest/autorest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + log = ctrl.Log.WithName("helpers") +) + +// KubernetesResourceName returns the resource name for other components +func KubernetesResourceName(name string) string { + reg, _ := regexp.Compile("[^a-zA-Z0-9_-]+") + return reg.ReplaceAllString(name, "-") +} + +func AzrueResourceGroupName(subscriptionID, clusterName, resourceType, name, namespace string) string { + nameParts := []string{subscriptionID, clusterName, resourceType, name, namespace} + nameString := strings.Join(nameParts, "-") + log.V(1).Info("Getting Azure Resource Group Name", "nameString", nameString) + hash := md5.New() + io.WriteString(hash, nameString) + return fmt.Sprintf("aso-%x", hash.Sum(nil)) +} + +func IgnoreKubernetesResourceNotFound(err error) error { + return client.IgnoreNotFound(err) +} + +func IgnoreAzureResourceNotFound(err error) error { + if err.(autorest.DetailedError).StatusCode.(int) == 404 { + return nil + } + return err +} diff --git a/pkg/helpers/secret.go b/pkg/helpers/secret.go new file mode 100644 index 00000000000..dc6f7a891fd --- /dev/null +++ b/pkg/helpers/secret.go @@ -0,0 +1,49 @@ +package helpers + +import ( + "github.com/Azure/azure-service-operator/pkg/config" + apiv1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func CreateSecret(resource interface{}, svcName, svcNamespace string, secretTemplate map[string]string) string { + data := map[string]string{} + for key, value := range secretTemplate { + tempValue, err := Templatize(value, Data{Obj: resource}) + if err != nil { + log.Error(err, "error parsing config map template") + return "" + } + data[key] = tempValue + } + + secretName := KubernetesResourceName(svcName) + secretObj := &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: svcNamespace, + }, + StringData: data, + } + + _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Get(secretName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Create(secretObj) + if err != nil { + log.Error(err, "error creating Secret") + } + } else { + _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Update(secretObj) + if err != nil { + log.Error(err, "error updating Secret") + } + } + + return secretName +} + +func DeleteSecret(svcName, svcNamespace string) error { + secretName := KubernetesResourceName(svcName) + return config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Delete(secretName, &metav1.DeleteOptions{}) +} diff --git a/pkg/helpers/service.go b/pkg/helpers/service.go new file mode 100644 index 00000000000..2cebc8518f8 --- /dev/null +++ b/pkg/helpers/service.go @@ -0,0 +1,57 @@ +package helpers + +import ( + "strconv" + "strings" + + "github.com/Azure/azure-service-operator/pkg/config" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CreateExternalNameService will create a Kubernetes Servic Using ExternalName types +func CreateExternalNameService(resource interface{}, svcName string, svcNamespace string, externalNameTemplate string, svcPortTemplate string) string { + externalName, err := Templatize(externalNameTemplate, Data{Obj: resource}) + if err != nil { + log.Error(err, "error parsing external name template") + return "" + } + + svcPortString, err := Templatize(svcPortTemplate, Data{Obj: resource}) + if err != nil { + log.Error(err, "error parsing service port template") + return "" + } + + svcPortStripSlash := strings.Replace(svcPortString, "\\", "", -1) + + svcPortInt64, err := strconv.ParseInt(svcPortStripSlash, 0, 16) + if err != nil { + log.Error(err, "error converting service port template string to int") + return "" + } + + // ParseInt only returns an int64, must convert to int32 for apiv1.ServicePort field + svcPort := int32(svcPortInt64) + + service := &apiv1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: KubernetesResourceName(svcName), + }, + Spec: apiv1.ServiceSpec{ + Type: apiv1.ServiceTypeExternalName, + ExternalName: externalName, + Ports: []apiv1.ServicePort{ + apiv1.ServicePort{ + Port: svcPort, + }, + }, + }, + } + + newService, err := config.Instance.KubeClientset.CoreV1().Services(svcNamespace).Create(service) + if err != nil { + log.Error(err, "error creating service") + } + return newService.Name +} diff --git a/pkg/helpers/types.go b/pkg/helpers/types.go new file mode 100644 index 00000000000..ee69776d1c6 --- /dev/null +++ b/pkg/helpers/types.go @@ -0,0 +1,6 @@ +package helpers + +// Data wrapps the object that is needed for the services +type Data struct { + Obj interface{} +} diff --git a/pkg/iam/authorizers.go b/pkg/iam/authorizers.go new file mode 100644 index 00000000000..3561073ec58 --- /dev/null +++ b/pkg/iam/authorizers.go @@ -0,0 +1,58 @@ +package iam + +import ( + "github.com/Azure/azure-service-operator/pkg/config" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure/auth" +) + +var ( + armAuthorizer autorest.Authorizer +) + +// GetResourceManagementAuthorizer gets an OAuthTokenAuthorizer for Azure Resource Manager +func GetResourceManagementAuthorizer() (autorest.Authorizer, error) { + if armAuthorizer != nil { + return armAuthorizer, nil + } + + var a autorest.Authorizer + var err error + + if config.Instance.UseAADPodIdentity { + a, err = auth.NewAuthorizerFromEnvironment() + } else { + a, err = getAuthorizerForResource(config.Environment().ResourceManagerEndpoint) + } + if err == nil { + // cache + armAuthorizer = a + } else { + // clear cache + armAuthorizer = nil + } + + return armAuthorizer, err +} + +func getAuthorizerForResource(resource string) (autorest.Authorizer, error) { + var a autorest.Authorizer + var err error + + oauthConfig, err := adal.NewOAuthConfig( + config.Environment().ActiveDirectoryEndpoint, config.Instance.TenantID) + if err != nil { + return nil, err + } + + token, err := adal.NewServicePrincipalToken( + *oauthConfig, config.Instance.ClientID, config.Instance.ClientSecret, resource) + if err != nil { + return nil, err + } + a = autorest.NewBearerAuthorizer(token) + + return a, err +} diff --git a/pkg/resourcemanager/cosmosdbs/cosmosdbs.go b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go new file mode 100644 index 00000000000..c36eb096fea --- /dev/null +++ b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go @@ -0,0 +1,104 @@ +package cosmosdbs + +import ( + "context" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2015-04-08/documentdb" + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" + "github.com/Azure/go-autorest/autorest/to" +) + +func getCosmosDBClient() documentdb.DatabaseAccountsClient { + cosmosDBClient := documentdb.NewDatabaseAccountsClient(config.SubscriptionID()) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + cosmosDBClient.Authorizer = a + cosmosDBClient.AddToUserAgent(config.UserAgent()) + return cosmosDBClient +} + +// CreateCosmosDB creates a new CosmosDB +func CreateCosmosDB(ctx context.Context, groupName string, + cosmosDBName string, + location string, + kind azurev1.CosmosDBKind, + dbType azurev1.CosmosDBDatabaseAccountOfferType, + tags map[string]*string) (documentdb.DatabaseAccount, error) { + cosmosDBClient := getCosmosDBClient() + + log.Println("CosmosDB:CosmosDBName" + cosmosDBName) + + /* Uncomment and update if we should be checking for name exists first + result, err = cosmosDBClient.CheckNameExists(ctx, cosmosDBName) + if err != nil { + return documentdb.DatabaseAccount.{}, err + } + result. + if *result.NameAvailable == false { + log.Fatalf("storage account not available: %v\n", result.Reason) + return storage.Account{}, errors.New("storage account not available") + }*/ + + dbKind := documentdb.DatabaseAccountKind(kind) + sDBType := string(dbType) + + /* + * Current state of Locations and CosmosDB properties: + * Creating a Database account with CosmosDB requires + * that DatabaseAccountCreateUpdateProperties be sent over + * and currently we are not reading most of these values in + * as part of the Spec for CosmosDB. We are currently + * specifying a single Location as part of a location array + * which matches the location set for the overall CosmosDB + * instance. This matches the general behavior of creating + * a CosmosDB instance in the portal where the only + * geo-relicated region is the sole region the CosmosDB + * is created in. + */ + locationObj := documentdb.Location{ + ID: to.StringPtr(fmt.Sprintf("%s-%s", cosmosDBName, location)), + FailoverPriority: to.Int32Ptr(0), + LocationName: to.StringPtr(location), + } + + locationsArray := []documentdb.Location{ + locationObj, + } + + createUpdateParams := documentdb.DatabaseAccountCreateUpdateParameters{ + Location: to.StringPtr(location), + Tags: tags, + Name: &cosmosDBName, + Kind: dbKind, + Type: to.StringPtr("Microsoft.DocumentDb/databaseAccounts"), + ID: &cosmosDBName, + DatabaseAccountCreateUpdateProperties: &documentdb.DatabaseAccountCreateUpdateProperties{ + DatabaseAccountOfferType: &sDBType, + EnableMultipleWriteLocations: to.BoolPtr(false), + IsVirtualNetworkFilterEnabled: to.BoolPtr(false), + Locations: &locationsArray, + }, + } + + log.Println(fmt.Sprintf("creating cosmosDB '%s' in resource group '%s' and location: %v", cosmosDBName, groupName, location)) + + future, err := cosmosDBClient.CreateOrUpdate( + ctx, groupName, cosmosDBName, createUpdateParams) + if err != nil { + log.Println(fmt.Sprintf("ERROR creating cosmosDB '%s' in resource group '%s' and location: %v", cosmosDBName, groupName, location)) + log.Println(fmt.Printf("failed to initialize cosmosdb: %v\n", err)) + } + return future.Result(cosmosDBClient) +} + +// DeleteCosmosDB removes the resource group named by env var +func DeleteCosmosDB(ctx context.Context, groupName string, cosmosDBName string) (result documentdb.DatabaseAccountsDeleteFuture, err error) { + cosmosDBClient := getCosmosDBClient() + return cosmosDBClient.Delete(ctx, groupName, cosmosDBName) +} diff --git a/pkg/resourcemanager/eventhubs/consumergroup_test.go b/pkg/resourcemanager/eventhubs/consumergroup_test.go index b070ca1208b..0824df9b15a 100644 --- a/pkg/resourcemanager/eventhubs/consumergroup_test.go +++ b/pkg/resourcemanager/eventhubs/consumergroup_test.go @@ -46,7 +46,7 @@ var _ = Describe("ConsumerGroup", func() { _, _ = CreateNamespaceAndWait(context.Background(), rgName, eventhubNamespaceName, namespaceLocation) - _, _ = CreateHub(context.Background(), rgName, eventhubNamespaceName, eventhubName, messageRetentionInDays, partitionCount) + _, _ = CreateHub(context.Background(), rgName, eventhubNamespaceName, eventhubName, messageRetentionInDays, partitionCount, nil) }) diff --git a/pkg/resourcemanager/eventhubs/hub.go b/pkg/resourcemanager/eventhubs/hub.go index b1a500e22bd..721ec15c910 100644 --- a/pkg/resourcemanager/eventhubs/hub.go +++ b/pkg/resourcemanager/eventhubs/hub.go @@ -3,7 +3,6 @@ package eventhubs import ( "context" "fmt" - "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" @@ -39,7 +38,7 @@ func DeleteHub(ctx context.Context, resourceGroupName string, namespaceName stri // resourceGroupName - name of the resource group within the azure subscription. // namespaceName - the Namespace name // eventHubName - the Event Hub name -func CreateHub(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, MessageRetentionInDays int32, PartitionCount int32) (eventhub.Model, error) { +func CreateHub(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, MessageRetentionInDays int32, PartitionCount int32, captureDescription *eventhub.CaptureDescription) (eventhub.Model, error) { hubClient := getHubsClient() // MessageRetentionInDays - Number of days to retain the events for this Event Hub, value should be 1 to 7 days @@ -47,21 +46,24 @@ func CreateHub(ctx context.Context, resourceGroupName string, namespaceName stri return eventhub.Model{}, fmt.Errorf("MessageRetentionInDays is invalid") } - // PartitionCount - Number of partitions created for the Event Hub, allowed values are from 1 to 32 partitions. - if PartitionCount < 1 || PartitionCount > 32 { + // PartitionCount - Number of partitions created for the Event Hub, allowed values are from 2 to 32 partitions. + if PartitionCount < 2 || PartitionCount > 32 { return eventhub.Model{}, fmt.Errorf("PartitionCount is invalid") } + properties := eventhub.Properties{ + PartitionCount: to.Int64Ptr(int64(PartitionCount)), + MessageRetentionInDays: to.Int64Ptr(int64(MessageRetentionInDays)), + CaptureDescription: captureDescription, + } + return hubClient.CreateOrUpdate( ctx, resourceGroupName, namespaceName, eventHubName, eventhub.Model{ - Properties: &eventhub.Properties{ - PartitionCount: to.Int64Ptr(int64(PartitionCount)), - MessageRetentionInDays: to.Int64Ptr(int64(MessageRetentionInDays)), - }, + Properties: &properties, }, ) } diff --git a/pkg/resourcemanager/eventhubs/hub_test.go b/pkg/resourcemanager/eventhubs/hub_test.go index 8a2d883b572..00833b08f91 100644 --- a/pkg/resourcemanager/eventhubs/hub_test.go +++ b/pkg/resourcemanager/eventhubs/hub_test.go @@ -57,11 +57,11 @@ var _ = Describe("Eventhub", func() { eventhubName := "t-eh-" + helpers.RandomString(10) messageRetentionInDays := int32(7) - partitionCount := int32(1) + partitionCount := int32(2) var err error - _, err = CreateHub(context.Background(), rgName, eventhubNamespaceName, eventhubName, messageRetentionInDays, partitionCount) + _, err = CreateHub(context.Background(), rgName, eventhubNamespaceName, eventhubName, messageRetentionInDays, partitionCount, nil) Expect(err).NotTo(HaveOccurred()) Eventually(func() bool { diff --git a/pkg/resourcemanager/eventhubs/suite_test.go b/pkg/resourcemanager/eventhubs/suite_test.go index 3ddb4f3f8e8..4568ae87b43 100644 --- a/pkg/resourcemanager/eventhubs/suite_test.go +++ b/pkg/resourcemanager/eventhubs/suite_test.go @@ -57,7 +57,9 @@ var _ = BeforeSuite(func(done Done) { By("bootstrapping test environment") - resourcemanagerconfig.LoadSettings() + resourcemanagerconfig.ParseEnvironment() + resourceGroupName = "t-rg-dev-rm-eh-" + helpers.RandomString(10) + resourcegroupLocation = resourcemanagerconfig.DefaultLocation() //create resourcegroup for this suite result, _ := resoucegroupsresourcemanager.CheckExistence(context.Background(), resourceGroupName) diff --git a/pkg/resourcemanager/rediscaches/rediscaches.go b/pkg/resourcemanager/rediscaches/rediscaches.go new file mode 100644 index 00000000000..8ebe0e2df29 --- /dev/null +++ b/pkg/resourcemanager/rediscaches/rediscaches.go @@ -0,0 +1,85 @@ +package rediscaches + +import ( + "context" + "errors" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/redis/mgmt/2018-03-01/redis" + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" + "github.com/Azure/go-autorest/autorest/to" +) + +func getRedisCacheClient() redis.Client { + redisClient := redis.NewClient(config.SubscriptionID()) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + redisClient.Authorizer = a + redisClient.AddToUserAgent(config.UserAgent()) + return redisClient +} + +// CreateRedisCache creates a new RedisCache +func CreateRedisCache(ctx context.Context, + groupName string, + redisCacheName string, + location string, + sku azurev1.RedisCacheSku, + enableNonSSLPort bool, + tags map[string]*string) (redis.ResourceType, error) { + redisClient := getRedisCacheClient() + + log.Println("RedisCache:CacheName" + redisCacheName) + + //Check if name is available + redisType := "Microsoft.Cache/redis" + checkNameParams := redis.CheckNameAvailabilityParameters{ + Name: &redisCacheName, + Type: &redisType, + } + result, err := redisClient.CheckNameAvailability(ctx, checkNameParams) + if err != nil { + return redis.ResourceType{}, err + } + + if result.StatusCode != 200 { + log.Fatalf("redis cache name (%s) not available: %v\n", redisCacheName, result.Status) + return redis.ResourceType{}, errors.New("redis cache name not available") + } + + log.Println(fmt.Sprintf("creating rediscache '%s' in resource group '%s' and location: %v", redisCacheName, groupName, location)) + + redisSku := redis.Sku{ + Name: redis.SkuName(sku.Name), + Family: redis.SkuFamily(sku.Family), + Capacity: to.Int32Ptr(sku.Capacity), + } + + createParams := redis.CreateParameters{ + Location: to.StringPtr(location), + Tags: tags, + CreateProperties: &redis.CreateProperties{ + EnableNonSslPort: &enableNonSSLPort, + Sku: &redisSku, + }, + } + + future, err := redisClient.Create( + ctx, groupName, redisCacheName, createParams) + if err != nil { + log.Println(fmt.Sprintf("ERROR creating redisCache '%s' in resource group '%s' and location: %v", redisCacheName, groupName, location)) + log.Println(fmt.Printf("failed to initialize redis Cache: %v\n", err)) + } + return future.Result(redisClient) +} + +// DeleteRedisCache removes the resource group named by env var +func DeleteRedisCache(ctx context.Context, groupName string, redisCacheName string) (result redis.DeleteFuture, err error) { + redisClient := getRedisCacheClient() + return redisClient.Delete(ctx, groupName, redisCacheName) +} diff --git a/pkg/resourcemanager/resourcegroups/resourcegroup_test.go b/pkg/resourcemanager/resourcegroups/resourcegroup_test.go index 099ecde64c2..963b9a30e7d 100644 --- a/pkg/resourcemanager/resourcegroups/resourcegroup_test.go +++ b/pkg/resourcemanager/resourcegroups/resourcegroup_test.go @@ -18,12 +18,13 @@ package resourcegroups import ( "context" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - helpers "github.com/Azure/azure-service-operator/pkg/helpers" + "github.com/Azure/azure-service-operator/pkg/helpers" ) var _ = Describe("ResourceGroups", func() { @@ -48,7 +49,7 @@ var _ = Describe("ResourceGroups", func() { const timeout = time.Second * 240 resourcegroupName := "t-rg-" + helpers.RandomString(10) - resourcegroupLocation := "westus" + resourcegroupLocation := config.DefaultLocation() var err error _, err = CreateGroup(context.Background(), resourcegroupName, resourcegroupLocation) diff --git a/pkg/resourcemanager/resourcegroups/suite_test.go b/pkg/resourcemanager/resourcegroups/suite_test.go index 5a7a91bbfe2..2baddb63b4b 100644 --- a/pkg/resourcemanager/resourcegroups/suite_test.go +++ b/pkg/resourcemanager/resourcegroups/suite_test.go @@ -46,7 +46,7 @@ var _ = BeforeSuite(func(done Done) { By("bootstrapping test environment") - resourcemanagerconfig.LoadSettings() + resourcemanagerconfig.ParseEnvironment() close(done) }, 60) diff --git a/pkg/resourcemanager/sqlclient/endtoend_test.go b/pkg/resourcemanager/sqlclient/endtoend_test.go index f85d7854b42..119de4a2812 100644 --- a/pkg/resourcemanager/sqlclient/endtoend_test.go +++ b/pkg/resourcemanager/sqlclient/endtoend_test.go @@ -45,6 +45,7 @@ func TestCreateOrUpdateSQLServer(t *testing.T) { sqlServerProperties := SQLServerProperties{ AdministratorLogin: to.StringPtr("Moss"), AdministratorLoginPassword: to.StringPtr("TheITCrowd_{01}!"), + AllowAzureServicesAccess: true, } // wait for server to be created, then only proceed once activated diff --git a/pkg/resourcemanager/sqlclient/sqlproperties.go b/pkg/resourcemanager/sqlclient/sqlproperties.go index 7d3131d7685..23f1116d3cc 100644 --- a/pkg/resourcemanager/sqlclient/sqlproperties.go +++ b/pkg/resourcemanager/sqlclient/sqlproperties.go @@ -53,6 +53,9 @@ type SQLServerProperties struct { // AdministratorLoginPassword - The administrator login password (required for server creation). AdministratorLoginPassword *string + + // AllowAzureServicesAccess - allow Azure services and resources to access this server + AllowAzureServicesAccess bool } // SQLDatabaseProperties contains values needed for adding / updating SQL servers, diff --git a/pkg/resourcemanager/storages/blob_containers.go b/pkg/resourcemanager/storages/blob_containers.go new file mode 100644 index 00000000000..cedb423214a --- /dev/null +++ b/pkg/resourcemanager/storages/blob_containers.go @@ -0,0 +1,84 @@ +/* +Copyright 2019 microsoft. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storages + +import ( + "context" + "fmt" + s "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" + "github.com/Azure/go-autorest/autorest" + "log" +) + +func getContainerClient() s.BlobContainersClient { + containersClient := s.NewBlobContainersClient(config.SubscriptionID()) + auth, _ := iam.GetResourceManagementAuthorizer() + containersClient.Authorizer = auth + containersClient.AddToUserAgent(config.UserAgent()) + return containersClient +} + +// Creates a blob container in a storage account. +// Parameters: +// resourceGroupName - name of the resource group within the azure subscription. +// accountName - the name of the storage account +// containerName - the name of the container +func CreateBlobContainer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*s.BlobContainer, error) { + containerClient := getContainerClient() + + log.Println(fmt.Sprintf("Creating blob container '%s' in storage account: %s", containerName, accountName)) + + container, err := containerClient.Create( + ctx, + resourceGroupName, + accountName, + containerName, + s.BlobContainer{}) + + if err != nil { + return nil, err + } + + return &container, err +} + +// Get gets the description of the specified blob container. +// Parameters: +// resourceGroupName - name of the resource group within the azure subscription. +// accountName - the name of the storage account +// containerName - the name of the container +func GetBlobContainer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result s.BlobContainer, err error) { + containerClient := getContainerClient() + return containerClient.Get(ctx, resourceGroupName, accountName, containerName) +} + +// Deletes a blob container in a storage account. +// Parameters: +// resourceGroupName - name of the resource group within the azure subscription. +// accountName - the name of the storage account +// containerName - the name of the container +func DeleteBlobContainer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result autorest.Response, err error) { + containerClient := getContainerClient() + log.Println(fmt.Sprintf("Deleting blob container '%s' for resource group: %s", containerName, accountName)) + + return containerClient.Delete(ctx, + resourceGroupName, + accountName, + containerName) +} diff --git a/pkg/resourcemanager/storages/blob_containers_test.go b/pkg/resourcemanager/storages/blob_containers_test.go new file mode 100644 index 00000000000..c9555b79f73 --- /dev/null +++ b/pkg/resourcemanager/storages/blob_containers_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 microsoft. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storages + +import ( + "context" + apiv1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/helpers" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "time" +) + +var _ = Describe("Blob Container", func() { + + const timeout = time.Second * 180 + + var storageAccountName = "tsadevsc" + helpers.RandomString(10) + + BeforeEach(func() { + storageLocation := config.DefaultLocation() + // Add any setup steps that needs to be executed before each test + _, _ = CreateStorage(context.Background(), resourceGroupName, storageAccountName, storageLocation, apiv1.StorageSku{ + Name: "Standard_LRS", + }, "Storage", map[string]*string{}, "", nil) + }) + + AfterEach(func() { + // Add any teardown steps that needs to be executed after each test + _, _ = DeleteStorage(context.Background(), resourceGroupName, storageAccountName) + }) + + // Add Tests for OpenAPI validation (or additonal CRD features) specified in + // your API definition. + // Avoid adding tests for vanilla CRUD operations because they would + // test Kubernetes API server, which isn't the goal here. + + Context("Create and Delete", func() { + It("should create and delete blob container in azure", func() { + + var err error + + containerName := "t-dev-bc-" + helpers.RandomString(10) + + _, err = CreateBlobContainer(context.Background(), resourceGroupName, storageAccountName, containerName) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + result, _ := GetBlobContainer(context.Background(), resourceGroupName, storageAccountName, containerName) + return result.Response.StatusCode == 200 + }, timeout, + ).Should(BeTrue()) + + _, err = DeleteBlobContainer(context.Background(), resourceGroupName, storageAccountName, containerName) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + result, _ := GetBlobContainer(context.Background(), resourceGroupName, storageAccountName, containerName) + return result.Response.StatusCode == 404 + }, timeout, + ).Should(BeTrue()) + + }) + + }) +}) diff --git a/pkg/resourcemanager/storages/storages.go b/pkg/resourcemanager/storages/storages.go new file mode 100644 index 00000000000..ebcf23cd426 --- /dev/null +++ b/pkg/resourcemanager/storages/storages.go @@ -0,0 +1,91 @@ +package storages + +import ( + "context" + "errors" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage" + apiv1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" +) + +func getStoragesClient() storage.AccountsClient { + storagesClient := storage.NewAccountsClient(config.SubscriptionID()) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + storagesClient.Authorizer = a + storagesClient.AddToUserAgent(config.UserAgent()) + return storagesClient +} + +// CreateStorage creates a new storage account +func CreateStorage(ctx context.Context, groupName string, + storageAccountName string, + location string, + sku apiv1.StorageSku, + kind apiv1.StorageKind, + tags map[string]*string, + accessTier apiv1.StorageAccessTier, + enableHTTPsTrafficOnly *bool) (storage.Account, error) { + storagesClient := getStoragesClient() + + storageType := "Microsoft.Storage/storageAccounts" + checkAccountParams := storage.AccountCheckNameAvailabilityParameters{Name: &storageAccountName, Type: &storageType} + result, err := storagesClient.CheckNameAvailability(ctx, checkAccountParams) + if err != nil { + return storage.Account{}, err + } + + if *result.NameAvailable == false { + log.Fatalf("storage account not available: %v\n", result.Reason) + return storage.Account{}, errors.New("storage account not available") + } + + sSku := storage.Sku{Name: storage.SkuName(sku.Name)} + sKind := storage.Kind(kind) + sAccessTier := storage.AccessTier(accessTier) + + params := storage.AccountCreateParameters{ + Location: to.StringPtr(location), + Sku: &sSku, + Kind: sKind, + Tags: tags, + Identity: nil, + AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{ + AccessTier: sAccessTier, + EnableHTTPSTrafficOnly: enableHTTPsTrafficOnly, + }, + } + + log.Println(fmt.Sprintf("creating storage '%s' in resource group '%s' and location: %v", storageAccountName, groupName, location)) + future, err := storagesClient.Create(ctx, groupName, storageAccountName, params) + + err = future.WaitForCompletionRef(ctx, storagesClient.Client) + if err != nil { + return storage.Account{}, err + } + + return future.Result(storagesClient) +} + +// Get gets the description of the specified storage account. +// Parameters: +// resourceGroupName - name of the resource group within the azure subscription. +// accountName - the name of the storage account +func GetStorage(ctx context.Context, resourceGroupName string, accountName string) (result storage.Account, err error) { + storagesClient := getStoragesClient() + return storagesClient.GetProperties(ctx, resourceGroupName, accountName, "") +} + +// DeleteStorage removes the resource group named by env var +func DeleteStorage(ctx context.Context, groupName string, storageAccountName string) (result autorest.Response, err error) { + storagesClient := getStoragesClient() + return storagesClient.Delete(ctx, groupName, storageAccountName) +} diff --git a/pkg/resourcemanager/storages/storages_test.go b/pkg/resourcemanager/storages/storages_test.go new file mode 100644 index 00000000000..d2ab3b27c54 --- /dev/null +++ b/pkg/resourcemanager/storages/storages_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 microsoft. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storages + +import ( + "context" + apiv1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/go-autorest/autorest/to" + "time" + + "github.com/Azure/azure-service-operator/pkg/helpers" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Storage Account", func() { + + const timeout = time.Second * 180 + + BeforeEach(func() { + // Add any setup steps that needs to be executed before each test + }) + + AfterEach(func() { + // Add any teardown steps that needs to be executed after each test + }) + + // Add Tests for OpenAPI validation (or additonal CRD features) specified in + // your API definition. + // Avoid adding tests for vanilla CRUD operations because they would + // test Kubernetes API server, which isn't the goal here. + + Context("Create and Delete", func() { + It("should create and delete storage account in azure", func() { + + storageAccountName := "tdevsa" + helpers.RandomString(10) + storageLocation := config.DefaultLocation() + + var err error + + _, err = CreateStorage(context.Background(), resourceGroupName, storageAccountName, storageLocation, apiv1.StorageSku{ + Name: "Standard_LRS", + }, "Storage", map[string]*string{}, "", to.BoolPtr(false)) + + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + result, _ := GetStorage(context.Background(), resourceGroupName, storageAccountName) + return result.Response.StatusCode == 200 + }, timeout, + ).Should(BeTrue()) + + _, err = DeleteStorage(context.Background(), resourceGroupName, storageAccountName) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + result, _ := GetStorage(context.Background(), resourceGroupName, storageAccountName) + return result.Response.StatusCode == 404 + }, timeout, + ).Should(BeTrue()) + }) + + }) +}) diff --git a/pkg/resourcemanager/storages/suite_test.go b/pkg/resourcemanager/storages/suite_test.go new file mode 100644 index 00000000000..e77c67d61f3 --- /dev/null +++ b/pkg/resourcemanager/storages/suite_test.go @@ -0,0 +1,78 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storages + +import ( + "github.com/Azure/azure-service-operator/pkg/helpers" + "testing" + + resourcemanagerconfig "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + resoucegroupsresourcemanager "github.com/Azure/azure-service-operator/pkg/resourcemanager/resourcegroups" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "context" + + "k8s.io/client-go/rest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +// TODO: consolidate these shared fixtures between this and eventhubs (and other services) + +var cfg *rest.Config +var resourcegroupLocation string + +var resourceGroupName = "t-rg-dev-rm-st-" + helpers.RandomString(10) + +func TestAPIs(t *testing.T) { + t.Parallel() + if testing.Short() { + t.Skip("skipping Resource Manager Eventhubs Suite") + } + RegisterFailHandler(Fail) + + RunSpecs(t, "Storage Suite") +} + +var _ = SynchronizedBeforeSuite(func() []byte { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + By("bootstrapping test environment") + + resourcemanagerconfig.ParseEnvironment() + resourcegroupLocation = resourcemanagerconfig.DefaultLocation() + + //create resourcegroup for this suite + result, _ := resoucegroupsresourcemanager.CheckExistence(context.Background(), resourceGroupName) + if result.Response.StatusCode != 204 { + _, _ = resoucegroupsresourcemanager.CreateGroup(context.Background(), resourceGroupName, resourcegroupLocation) + } + + return []byte{} +}, func(r []byte) {}, 60) + +var _ = SynchronizedAfterSuite(func() {}, func() { + //clean up the resources created for test + By("tearing down the test environment") + + _, _ = resoucegroupsresourcemanager.DeleteGroup(context.Background(), resourceGroupName) +}, 60) diff --git a/pkg/template/assets/cosmosdb.json b/pkg/template/assets/cosmosdb.json new file mode 100644 index 00000000000..f4feb76cae2 --- /dev/null +++ b/pkg/template/assets/cosmosdb.json @@ -0,0 +1,47 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "String" + }, + "kind": { + "type": "String" + }, + "properties": { + "type": "Object" + } + }, + "variables": { + "cosmosDBName": "[concat('aso', uniqueString(resourceGroup().id))]" + }, + "resources": [ + { + "type": "Microsoft.DocumentDB/databaseAccounts", + "apiVersion": "2015-04-08", + "name": "[variables('cosmosDBName')]", + "location": "[parameters('location')]", + "dependsOn": [], + "kind": "[parameters('kind')]", + "properties": { + "databaseAccountOfferType": "[parameters('properties').databaseAccountOfferType]", + "locations": [ + { + "locationName": "[parameters('location')]", + "failoverPriority": 0 + } + ] + } + } + ], + "outputs": { + "cosmosDBName": { + "type": "string", + "value": "[variables('cosmosDBName')]" + }, + "primaryMasterKey": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.DocumentDB/databaseAccounts', variables('cosmosDBName')), '2015-04-08').primaryMasterKey]" + } + } +} diff --git a/pkg/template/assets/rediscache.json b/pkg/template/assets/rediscache.json new file mode 100644 index 00000000000..551af6f2ce5 --- /dev/null +++ b/pkg/template/assets/rediscache.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "string" + }, + "properties.sku.name": { + "type": "string" + }, + "properties.sku.family": { + "type": "string" + }, + "properties.sku.capacity": { + "type": "int" + }, + "properties.enableNonSslPort": { + "type": "bool" + } + }, + "variables": { + "redisCacheName": "[concat('aso', uniqueString(resourceGroup().id))]" + }, + "resources": [ + { + "type": "Microsoft.Cache/Redis", + "apiVersion": "2018-03-01", + "name": "[variables('redisCacheName')]", + "location": "[parameters('location')]", + "properties": { + "sku": { + "name": "[parameters('properties.sku.name')]", + "family": "[parameters('properties.sku.family')]", + "capacity": "[parameters('properties.sku.capacity')]" + }, + "enableNonSslPort": "[parameters('properties.enableNonSslPort')]", + "redisConfiguration": {} + } + } + ], + "outputs": { + "redisCacheName": { + "type": "string", + "value": "[variables('redisCacheName')]" + }, + "primaryKey": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Cache/Redis', variables('redisCacheName')), '2018-03-01').primaryKey]" + }, + "secondaryKey": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Cache/Redis', variables('redisCacheName')), '2018-03-01').secondaryKey]" + } + } +} diff --git a/pkg/template/assets/storage.json b/pkg/template/assets/storage.json new file mode 100644 index 00000000000..893f47ae8e7 --- /dev/null +++ b/pkg/template/assets/storage.json @@ -0,0 +1,63 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "String" + }, + "accountType": { + "type": "String" + }, + "kind": { + "type": "String" + }, + "accessTier": { + "type": "String" + }, + "supportsHttpsTrafficOnly": { + "type": "Bool" + } + }, + "variables": { + "storageAccountName": "[concat('aso', uniqueString(resourceGroup().id))]" + }, + "resources": [ + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "2018-07-01", + "name": "[variables('storageAccountName')]", + "location": "[parameters('location')]", + "dependsOn": [], + "sku": { + "name": "[parameters('accountType')]" + }, + "kind": "[parameters('kind')]", + "properties": { + "accessTier": "[parameters('accessTier')]", + "supportsHttpsTrafficOnly": "[parameters('supportsHttpsTrafficOnly')]" + } + } + ], + "outputs": { + "storageAccountName": { + "type": "string", + "value": "[variables('storageAccountName')]" + }, + "key1": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[0].value]" + }, + "key2": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[1].value]" + }, + "connectionString1": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=',variables('storageAccountName'),';AccountKey=',listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[0].value,';EndpointSuffix=core.windows.net')]" + }, + "connectionString2": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=',variables('storageAccountName'),';AccountKey=',listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[1].value,';EndpointSuffix=core.windows.net')]" + } + } +} diff --git a/pkg/template/templates.go b/pkg/template/templates.go new file mode 100644 index 00000000000..6a9798c4ebc --- /dev/null +++ b/pkg/template/templates.go @@ -0,0 +1,281 @@ +// Code generated by go-bindata. +// sources: +// pkg/template/assets/cosmosdb.json +// pkg/template/assets/rediscache.json +// pkg/template/assets/storage.json +// DO NOT EDIT! + +package template + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _cosmosdbJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x54\x4d\x6b\xdb\x40\x10\xbd\xfb\x57\x88\x6d\x41\x12\xc8\xb2\x5c\x5a\x28\xbe\x35\x18\x4a\x09\xa9\x0b\x0d\xbd\x18\x1f\xc6\xab\x71\xb2\xa9\xb4\xb3\xdd\x9d\x35\xa8\xc5\xff\xbd\xc8\xf2\x87\x24\x5b\x49\x88\x7d\x31\x33\xf3\xde\x9b\xd9\x37\xe3\x7f\xa3\x20\x08\x02\xf1\xde\xc9\x47\x2c\x41\xcc\x02\xf1\xc8\x6c\x66\x93\x49\x13\x48\x4b\xd0\xf0\x80\x25\x6a\x4e\xe1\xaf\xb7\x98\x4a\x2a\x0f\x39\x37\xf9\x90\x4d\x3f\x8d\xb3\xe9\x38\x9b\x4e\x72\x34\x05\x55\x75\xdd\x3d\x96\xa6\x00\xc6\xf4\xc9\x91\x7e\x27\x92\x46\x40\x92\x66\xd4\xfc\x0b\xad\x53\xa4\x6b\x9d\x69\x9a\xd5\xdf\x63\x81\x01\x0b\x25\x32\x5a\x27\x66\x41\xd3\xd5\x3e\x5e\x90\x04\x6e\x20\xe7\xe8\x3e\xc3\x95\xc1\x9a\xe8\x27\x5b\xa5\x1f\xc4\x29\xb9\x4b\xce\xe8\xdf\x4a\xe7\x6f\x43\x1a\x4b\x06\x2d\x2b\x74\xc3\xf8\xc5\xfa\x09\x25\xb7\xf0\xa3\x16\x8b\xd8\x82\x55\xb0\x2e\x7a\x04\x42\x92\x2b\xc9\xcd\x6f\xbe\x43\xb9\x27\x59\x4a\xd2\x12\x38\x0a\xc1\x51\x98\x04\x5e\xab\x3f\x1e\x9b\xce\x22\x8b\x8e\xbc\x95\xf8\xd5\x92\x37\x51\x9c\xaa\x3c\x8e\x57\xa2\xa3\x72\x2c\xa9\x55\x96\x27\x95\x81\x86\xef\x94\xb4\xe4\x68\xc3\xe9\x9c\xa4\xaf\xdd\x9a\xdf\x4c\x72\x60\x58\x83\xc3\x2f\x52\x92\xd7\xec\x44\xd2\x05\x83\x51\x2d\xdb\x1a\xcf\x3f\x8e\xb3\xcf\xfd\x3a\x7d\x1c\xe8\x34\x78\x14\xb6\x87\x0d\xe3\x55\x1f\xd2\x32\x57\x2c\xcf\x1b\x10\x85\xc7\xc4\x15\x4c\x8e\x06\x75\xee\x16\x35\x68\xb9\xea\x25\x0f\x7e\x77\xc9\xea\xe0\x15\xa2\x67\x1c\x6e\x84\xba\xef\xb2\xd8\x6c\xd0\xde\x1f\x1e\xb2\xc3\x7f\x26\x0a\xe3\x74\x08\xd5\x97\xef\xcc\xdf\x35\xaf\xfd\xb9\xec\xeb\x02\x7c\x5a\xa4\xd7\x3d\x60\x87\x63\x03\xaa\xa0\x2d\xda\x1f\x56\x91\x55\x5c\x89\x59\x90\x5d\xad\xde\x5d\x44\x57\xa3\xeb\xf9\xe6\xd7\xc1\x18\x41\x9e\x8d\xe7\xe7\x4f\x60\x60\x59\x5d\x73\x9d\x3d\xd7\xb6\x50\xf8\x17\xd7\x6c\xe0\xa2\x55\x09\xb6\xba\x03\xc7\x68\x6f\xb1\x7a\xbb\x72\xa1\x1c\xdf\x62\xe5\x4e\xf7\xf9\x2d\x8f\xc2\x57\xdd\x56\x98\x04\x83\x6d\xc7\x49\x10\x9e\xaf\x2b\x8c\xd3\x7e\xc3\xab\x8b\x7f\x9a\xd1\x6e\xf4\x3f\x00\x00\xff\xff\x6e\x83\x09\x2a\xc1\x05\x00\x00") + +func cosmosdbJsonBytes() ([]byte, error) { + return bindataRead( + _cosmosdbJson, + "cosmosdb.json", + ) +} + +func cosmosdbJson() (*asset, error) { + bytes, err := cosmosdbJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "cosmosdb.json", size: 1473, mode: os.FileMode(420), modTime: time.Unix(1563946123, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _rediscacheJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x54\x4d\x8f\xd3\x30\x10\xbd\xf7\x57\x44\x06\x29\x89\x94\xba\xc9\x22\x24\xd4\x2b\x07\x84\x56\xac\x10\x8b\xb8\x54\x3d\xcc\xba\xd3\xd6\x6c\xe2\x31\xfe\x58\xa9\xa0\xfe\x77\x94\xa6\xf9\x68\x9a\xa4\x1c\xf6\x40\x7b\xf3\x3c\xbf\xf7\x26\xf3\xc6\x7f\x66\x41\x10\x04\xec\xad\x15\x7b\x2c\x80\x2d\x03\xb6\x77\x4e\x2f\x17\x8b\xea\x80\x17\xa0\x60\x87\x05\x2a\xc7\xe1\xb7\x37\xc8\x05\x15\xe7\x9a\x5d\xdc\xa5\xd9\xfb\x79\x9a\xcd\xd3\x6c\xb1\x41\x9d\xd3\xa1\xc4\x7d\xc7\x42\xe7\xe0\x90\xff\xb4\xa4\xde\xb0\xa4\x12\x10\xa4\x1c\x2a\xf7\x03\x8d\x95\xa4\x4a\x9d\x8c\xa7\xe5\xbf\x06\x68\x30\x50\xa0\x43\x63\xd9\x32\xa8\x5c\x9d\xce\x73\x12\xe0\xaa\x2b\xed\xe9\xa9\xe2\x0e\x1a\x4b\x22\xeb\x8c\x54\x3b\xd6\x14\x8f\x49\x7b\x5b\x1b\xd2\x68\x9c\x44\xcb\xed\xb3\xe7\x0a\x0a\x7c\x15\xa2\x2d\x14\x32\x3f\xbc\x0a\x95\x00\x0d\x42\xba\x09\x32\xa9\xdc\x4d\x26\x54\xf0\x94\xe3\x03\xa9\x47\x9b\x7f\x25\xe3\xc6\xd9\x9e\x88\xf2\x0e\xdd\xac\x43\xca\x5e\xc0\xc8\x92\xa7\x37\x03\x83\x1b\x69\x3f\x82\xd8\xe3\x43\xf5\x01\xd9\x4a\x90\x12\xe0\xa2\x10\x2c\x85\x49\xe0\x95\xfc\xe5\xf1\xf1\xd4\x74\x64\xd0\x92\x37\x02\x3f\x19\xf2\x3a\x8a\xb9\xdc\xc4\xf1\x9a\x5d\xe8\xd4\x90\x52\x67\xd5\xe8\x8c\x18\xfe\x22\x85\x21\x4b\x5b\xc7\x4f\x16\x16\xdf\x4a\x37\x2c\xb9\x04\x83\x96\x9d\x6c\xdd\xa5\xd9\x87\x79\xfa\x6e\x9e\x66\x7d\x9c\xaa\x1b\x68\x5a\x8d\xc2\xcb\xf6\xc2\x78\xdd\xbf\xd4\xc9\x20\x5b\xb5\x41\x8d\xc2\xba\x30\x70\xa7\x9d\xcd\xd5\x28\x4e\x75\xfb\xec\x07\x0b\x97\x2e\xbb\x62\x03\x59\xbe\xd6\x6d\x28\x9a\x84\x4e\x92\x54\xa8\x09\x9a\x4e\x3a\x27\x89\x6a\x5c\x58\x8f\xba\xfb\x3b\x5e\x93\xb3\x81\xc0\x8e\x0a\xf4\xb1\xc3\x7e\xcf\x31\x25\xb5\x95\x3b\x6f\x9a\x47\xe3\x78\x01\x3c\xf6\x92\xbf\x3e\x27\x92\xbc\xd3\xde\xdd\xca\xfd\xf4\xb6\xf7\x12\xf0\x02\xb9\xff\x87\xac\x8d\x6c\xb6\x2c\xc0\x1c\xee\xf1\xf6\x1b\x33\xaa\x9a\x4b\xeb\xee\xf1\x60\x9b\x85\xfc\xbc\x89\xc2\xc1\x65\x0a\x93\x60\xc2\x62\x9c\x04\x61\xbb\x50\x61\xcc\x5b\x73\x23\xee\x2d\x0a\x52\x9b\xff\xd6\x7f\xd7\xde\xfa\xea\x29\x9c\x1d\x67\x7f\x03\x00\x00\xff\xff\xa7\x3e\x2d\x81\x15\x07\x00\x00") + +func rediscacheJsonBytes() ([]byte, error) { + return bindataRead( + _rediscacheJson, + "rediscache.json", + ) +} + +func rediscacheJson() (*asset, error) { + bytes, err := rediscacheJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "rediscache.json", size: 1813, mode: os.FileMode(420), modTime: time.Unix(1563946509, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _storageJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x55\x4f\x6f\xd3\x4e\x10\xbd\xe7\x53\x58\xfe\xfd\xa4\x4d\x24\xc7\xb1\x2b\x21\x50\xab\x1c\x40\x20\x40\x15\x14\xa9\x11\x97\x28\x87\x65\x3d\x69\x97\xda\x3b\xcb\xce\x6c\x8b\x41\xfd\xee\xc8\x71\x9d\x38\x4e\x4c\xaa\xf4\x80\x40\xbe\xed\xfc\x7b\xef\xf9\xcd\xee\xcf\x41\x10\x04\x41\xf8\x3f\xa9\x6b\x28\x64\x78\x1a\x84\xd7\xcc\xf6\x74\x32\xa9\x0f\xe2\x42\x1a\x79\x05\x05\x18\x8e\xe5\x0f\xef\x20\x56\x58\x3c\xc4\x68\x72\x92\xa4\xcf\xc6\x49\x3a\x4e\xd2\x49\x06\x36\xc7\xb2\xca\x9b\x41\x61\x73\xc9\x10\x7f\x25\x34\xff\x85\x51\x3d\x40\xa1\x61\x30\xfc\x19\x1c\x69\x34\xd5\x9c\x34\x4e\xaa\xaf\x49\xb0\xd2\xc9\x02\x18\x1c\x85\xa7\x41\x8d\x6a\x75\x9e\xa3\x92\x5c\x97\x6c\x4e\x57\x11\x2e\x2d\x54\x8d\x2e\xd9\x69\x73\x15\xae\x83\xf7\xd1\xa6\x5a\x2a\x85\xde\xf0\xac\x4e\x3d\xa2\xc1\x8d\x36\xd9\xd1\xa3\x81\x68\xa6\xc1\x1d\x57\x4f\xde\x5a\x74\x4c\xef\x98\x2d\xcd\x9c\x5c\x2e\xb5\xba\x30\x79\xd9\xdf\xed\x15\x62\xde\xea\x35\x68\x75\x0c\x6f\xa5\xd3\xf2\x4b\x0e\x1d\x75\x89\xd1\xc9\x2b\x78\x59\xcb\xf4\x51\x16\xab\x46\x73\x85\x46\x49\x1e\x0a\x49\x28\xa2\xc0\x1b\xfd\xcd\x43\x8d\x75\xe8\x80\xd0\x3b\x05\x6f\x1d\x7a\x3b\x1c\xc5\x3a\x1b\x8d\x16\xe1\xd6\xac\x26\xa5\x9a\x35\x5f\xcf\xea\x01\xfd\x41\x2b\x87\x84\x4b\x8e\x2f\x6b\x2c\x93\x6d\x4c\x14\x46\xdb\x85\xd2\xea\x96\x8b\x4e\x92\xf4\xc5\x38\x79\x3e\x4e\xd2\x6e\x9e\x69\xc8\xac\xa9\x0f\xc5\x2e\x5d\x31\x5a\x74\x0b\x5b\x8e\x0b\xe7\x1b\x5b\x0e\x45\x13\xd8\x53\x93\x81\x05\x93\xd1\x45\x55\x34\x5f\x74\x82\x74\xe3\x77\x7e\xda\x36\xc4\xf6\x94\x96\x65\x45\xa3\xec\x1e\x7f\x04\x2d\x77\x6e\x37\xa8\x0e\xf7\x40\xb4\x0e\x2d\x38\xd6\x1d\x0b\x6c\x74\x6d\xfb\x75\x07\xd2\x43\x68\xb7\x6f\x70\xc0\xab\xdb\x9d\xfa\x12\x77\x99\x76\x7c\xfc\xa0\x69\x88\x9e\xad\xe7\xc7\xb8\xb8\xc7\x6f\x54\xaf\x5c\x47\x9c\x5b\x99\xfb\x47\xba\x65\xff\x35\x01\x65\x7a\xfc\xcc\x5c\x13\x9f\x43\x49\xeb\xe5\x7a\x9f\x0d\xc5\xc1\xc5\x10\x51\x70\x00\xec\x28\x0a\xc4\x66\x41\xc4\x28\xbe\x81\x92\xe6\xc9\x22\x5e\x8d\xee\xa7\x72\xf2\xb7\x50\x49\x7f\x4f\x45\xa1\x31\xa0\xaa\x95\xad\x2f\xaf\x27\xfc\xa2\xe6\x46\x7c\x0d\x4b\xe9\x73\x7e\x63\x32\x8b\xda\x30\x7d\x72\xc8\xa8\x30\x9f\x56\xcf\x26\x9d\xb5\x30\x4f\x45\x74\x80\x53\x24\x9a\xfc\x73\x28\xa7\x22\xfa\x33\x36\x88\xc4\x59\xc3\xe6\xd2\x2f\x97\xfa\xfb\x54\xa1\x83\xf8\x4e\x9b\x0c\xef\x28\x36\xc0\xbd\xae\xef\xea\xfb\x04\xdf\xfc\x7b\xfa\xa6\x47\xea\x5b\x3f\xa5\x83\xfb\xc1\xaf\x00\x00\x00\xff\xff\xa5\xf7\x8d\x6d\x9b\x09\x00\x00") + +func storageJsonBytes() ([]byte, error) { + return bindataRead( + _storageJson, + "storage.json", + ) +} + +func storageJson() (*asset, error) { + bytes, err := storageJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "storage.json", size: 2459, mode: os.FileMode(420), modTime: time.Unix(1563946123, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "cosmosdb.json": cosmosdbJson, + "rediscache.json": rediscacheJson, + "storage.json": storageJson, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "cosmosdb.json": &bintree{cosmosdbJson, map[string]*bintree{}}, + "rediscache.json": &bintree{rediscacheJson, map[string]*bintree{}}, + "storage.json": &bintree{storageJson, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +}