From e83db894bc6a7393368d3403f9dc5a374b17094b Mon Sep 17 00:00:00 2001 From: Gabriel Saratura Date: Wed, 8 Jan 2025 14:29:11 +0100 Subject: [PATCH] Support postgres major version upgrade --- Makefile | 12 +- apis/stackgres/v1/sgdbops.gen.go | 496 ++++++++- apis/stackgres/v1/sgdbops.yaml | 971 ++++++++---------- apis/stackgres/v1/zz_generated.deepcopy.go | 846 ++++++++++++++- apis/stackgres/v1beta1/sgobjectstorage.gen.go | 20 +- apis/stackgres/v1beta1/sgobjectstorage.yaml | 22 +- .../v1beta1/sgobjectstorage_crd.yaml | 526 +++++----- .../v1beta1/zz_generated.deepcopy.go | 24 +- apis/vshn/v1/dbaas_vshn_postgresql.go | 7 + cmd/maintenance.go | 22 +- crds/vshn.appcat.vshn.io_vshnpostgresqls.yaml | 6 + .../vshn.appcat.vshn.io_xvshnpostgresqls.yaml | 6 + pkg/{maintenance => }/auth/http.go | 3 +- pkg/auth/stackgres/client.go | 121 +++ pkg/auth/stackgres/client_test.go | 78 ++ .../functions/vshnpostgres/delay_cluster.go | 2 +- .../{loadBalancer.go => load_balancer.go} | 0 ...Balancer_test.go => load_balancer_test.go} | 0 .../functions/vshnpostgres/maintenance.go | 4 +- .../vshnpostgres/major_version_upgrade.go | 204 ++++ .../major_version_upgrade_test.go | 72 ++ .../vshnpostgres/postgresql_deploy.go | 57 +- .../vshnpostgres/postgresql_deploy_test.go | 7 +- .../functions/vshnpostgres/register.go | 4 + .../functions/vshnpostgres/restart.go | 8 + pkg/comp-functions/runtime/function_mgr.go | 2 +- pkg/controller/webhooks/postgresql.go | 318 +++--- pkg/controller/webhooks/postgresql_test.go | 143 +++ pkg/maintenance/postgresql.go | 119 +-- pkg/maintenance/postgresql_test.go | 2 + 30 files changed, 2899 insertions(+), 1203 deletions(-) rename pkg/{maintenance => }/auth/http.go (99%) create mode 100644 pkg/auth/stackgres/client.go create mode 100644 pkg/auth/stackgres/client_test.go rename pkg/comp-functions/functions/vshnpostgres/{loadBalancer.go => load_balancer.go} (100%) rename pkg/comp-functions/functions/vshnpostgres/{loadBalancer_test.go => load_balancer_test.go} (100%) create mode 100644 pkg/comp-functions/functions/vshnpostgres/major_version_upgrade.go create mode 100644 pkg/comp-functions/functions/vshnpostgres/major_version_upgrade_test.go diff --git a/Makefile b/Makefile index 2d4aaba2e2..c04d41bc0b 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,7 @@ PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) BIN_FILENAME ?= $(PROJECT_DIR)/appcat ## Stackgres CRDs -STACKGRES_VERSION ?= 1.4.3 +STACKGRES_VERSION ?= 1.14.2 STACKGRES_CRD_URL ?= https://gitlab.com/ongresinc/stackgres/-/raw/${STACKGRES_VERSION}/stackgres-k8s/src/common/src/main/resources/crds ## BUILD:go @@ -92,11 +92,11 @@ generate-stackgres-crds: go run github.com/deepmap/oapi-codegen/cmd/oapi-codegen --package=v1 -generate=types -o apis/stackgres/v1/sgdbops.gen.go apis/stackgres/v1/sgdbops.yaml perl -i -0pe 's/\*struct\s\{\n\s\sAdditionalProperties\smap\[string\]string\s`json:"-"`\n\s}/map\[string\]string/gms' apis/stackgres/v1/sgdbops.gen.go - curl ${STACKGRES_CRD_URL}/SGCluster.yaml?inline=false -o apis/stackgres/v1/sgcluster_crd.yaml - yq -i e apis/stackgres/v1/sgcluster.yaml --expression ".components.schemas.SGClusterSpec=load(\"apis/stackgres/v1/sgcluster_crd.yaml\").spec.versions[0].schema.openAPIV3Schema.properties.spec" - yq -i e apis/stackgres/v1/sgcluster.yaml --expression ".components.schemas.SGClusterStatus=load(\"apis/stackgres/v1/sgcluster_crd.yaml\").spec.versions[0].schema.openAPIV3Schema.properties.status" - go run github.com/deepmap/oapi-codegen/cmd/oapi-codegen --package=v1 -generate=types -o apis/stackgres/v1/sgcluster.gen.go apis/stackgres/v1/sgcluster.yaml - perl -i -0pe 's/\*struct\s\{\n\s\sAdditionalProperties\smap\[string\]string\s`json:"-"`\n\s}/map\[string\]string/gms' apis/stackgres/v1/sgcluster.gen.go + # curl ${STACKGRES_CRD_URL}/SGCluster.yaml?inline=false -o apis/stackgres/v1/sgcluster_crd.yaml + # yq -i e apis/stackgres/v1/sgcluster.yaml --expression ".components.schemas.SGClusterSpec=load(\"apis/stackgres/v1/sgcluster_crd.yaml\").spec.versions[0].schema.openAPIV3Schema.properties.spec" + # yq -i e apis/stackgres/v1/sgcluster.yaml --expression ".components.schemas.SGClusterStatus=load(\"apis/stackgres/v1/sgcluster_crd.yaml\").spec.versions[0].schema.openAPIV3Schema.properties.status" + # go run github.com/deepmap/oapi-codegen/cmd/oapi-codegen --package=v1 -generate=types -o apis/stackgres/v1/sgcluster.gen.go apis/stackgres/v1/sgcluster.yaml + # perl -i -0pe 's/\*struct\s\{\n\s\sAdditionalProperties\smap\[string\]string\s`json:"-"`\n\s}/map\[string\]string/gms' apis/stackgres/v1/sgcluster.gen.go # The generator for the pool config CRD unfortunately produces a broken result. However if we ever need to regenerate it in the future, please uncomment this. # curl ${STACKGRES_CRD_URL}/SGInstanceProfile.yaml?inline=false -o apis/stackgres/v1/sginstanceprofile_crd.yaml diff --git a/apis/stackgres/v1/sgdbops.gen.go b/apis/stackgres/v1/sgdbops.gen.go index 5462487d46..427a6378e7 100644 --- a/apis/stackgres/v1/sgdbops.gen.go +++ b/apis/stackgres/v1/sgdbops.gen.go @@ -13,7 +13,7 @@ type SGDbOpsSpec struct { // The maximum number of retries the operation is allowed to do after a failure. // - // A value of `0` (zero) means no retries are made. Can not be greater than `10`. Defaults to: `0`. + // A value of `0` (zero) means no retries are made. Defaults to: `0`. MaxRetries *int `json:"maxRetries,omitempty"` // Configuration of minor version upgrade @@ -28,7 +28,6 @@ type SGDbOpsSpec struct { // * `restart`: perform a restart of the cluster. // * `minorVersionUpgrade`: perform a minor version upgrade of PostgreSQL. // * `securityUpgrade`: perform a security upgrade of the cluster. - // * `upgrade`: perform a operator API upgrade of the cluster Op string `json:"op"` // Configuration of [`pg_repack`](https://github.com/reorg/pg_repack) command @@ -70,41 +69,299 @@ type SGDbOpsSpecBenchmark struct { // * `replicas-service`: Connect to the replicas service ConnectionType *string `json:"connectionType,omitempty"` + // The credentials of the user that will be used by the benchmark + Credentials *SGDbOpsSpecBenchmarkCredentials `json:"credentials,omitempty"` + + // When specified will indicate the database where the benchmark will run upon. + // + // If not specified a target database with a random name will be created and removed after the benchmark completes. + Database *string `json:"database,omitempty"` + // Configuration of [pgbench](https://www.postgresql.org/docs/current/pgbench.html) benchmark Pgbench *SGDbOpsSpecBenchmarkPgbench `json:"pgbench,omitempty"` + // Configuration of sampling benchmark. + Sampling *SGDbOpsSpecBenchmarkSampling `json:"sampling,omitempty"` + // The type of benchmark that will be performed on the SGCluster. Available benchmarks are: // // * `pgbench`: run [pgbench](https://www.postgresql.org/docs/current/pgbench.html) on the specified SGCluster and report the results in the status. + // * `sampling`: samples real queries and store them in the SGDbOps status in order to be used by a `pgbench` benchmark using `replay` mode. Type string `json:"type"` } +// SGDbOpsSpecBenchmarkCredentials defines model for SGDbOpsSpecBenchmarkCredentials. +type SGDbOpsSpecBenchmarkCredentials struct { + // The password that will be used by the benchmark + // + // If not specified the default superuser password will be used. + Password SGDbOpsSpecBenchmarkCredentialsPassword `json:"password"` + + // The username that will be used by the benchmark. + // + // If not specified the default superuser username (by default postgres) will be used. + Username SGDbOpsSpecBenchmarkCredentialsUsername `json:"username"` +} + +// SGDbOpsSpecBenchmarkCredentialsPassword defines model for SGDbOpsSpecBenchmarkCredentialsPassword. +type SGDbOpsSpecBenchmarkCredentialsPassword struct { + // The Secret key where the password is stored. + Key string `json:"key"` + + // The Secret name where the password is stored. + Name string `json:"name"` +} + +// SGDbOpsSpecBenchmarkCredentialsUsername defines model for SGDbOpsSpecBenchmarkCredentialsUsername. +type SGDbOpsSpecBenchmarkCredentialsUsername struct { + // The Secret key where the username is stored. + Key string `json:"key"` + + // The Secret name where the username is stored. + Name string `json:"name"` +} + // SGDbOpsSpecBenchmarkPgbench defines model for SGDbOpsSpecBenchmarkPgbench. type SGDbOpsSpecBenchmarkPgbench struct { // Number of clients simulated, that is, number of concurrent database sessions. Defaults to: `1`. ConcurrentClients *int `json:"concurrentClients,omitempty"` + // This section allow to configure custom SQL for initialization and scripts used by pgbench. + Custom *SGDbOpsSpecBenchmarkPgbenchCustom `json:"custom,omitempty"` + // Size of the database to generate. This size is specified either in Mebibytes, Gibibytes or Tebibytes (multiples of 2^20, 2^30 or 2^40, respectively). DatabaseSize string `json:"databaseSize"` // An ISO 8601 duration in the format `PnDTnHnMn.nS`, that specifies how long the benchmark will run. Duration string `json:"duration"` + // Create the pgbench_accounts, pgbench_tellers and pgbench_branches tables with the given fillfactor. Default is 100. + Fillfactor *int `json:"fillfactor,omitempty"` + + // Create foreign key constraints between the standard tables. (This option only take effect if `custom.initiailization` is not specified). + ForeignKeys *bool `json:"foreignKeys,omitempty"` + + // Perform just a selected set of the normal initialization steps. init_steps specifies the initialization steps to be performed, using one character per step. Each step is invoked in the specified order. The default is dtgvp. The available steps are: + // + // * `d` (Drop): Drop any existing pgbench tables. + // * `t` (create Tables): Create the tables used by the standard pgbench scenario, namely pgbench_accounts, pgbench_branches, pgbench_history, and pgbench_tellers. + // * `g` or `G` (Generate data, client-side or server-side): Generate data and load it into the standard tables, replacing any data already present. + // With `g` (client-side data generation), data is generated in pgbench client and then sent to the server. This uses the client/server bandwidth extensively through a COPY. pgbench uses the FREEZE option with version 14 or later of PostgreSQL to speed up subsequent VACUUM, unless partitions are enabled. Using g causes logging to print one message every 100,000 rows while generating data for the pgbench_accounts table. + // With `G` (server-side data generation), only small queries are sent from the pgbench client and then data is actually generated in the server. No significant bandwidth is required for this variant, but the server will do more work. Using G causes logging not to print any progress message while generating data. + // The default initialization behavior uses client-side data generation (equivalent to g). + // * `v` (Vacuum): Invoke VACUUM on the standard tables. + // * `p` (create Primary keys): Create primary key indexes on the standard tables. + // * `f` (create Foreign keys): Create foreign key constraints between the standard tables. (Note that this step is not performed by default.) + InitSteps *string `json:"initSteps,omitempty"` + + // The pgbench benchmark type: + // + // * `tpcb-like`: The benchmark is inspired by the [TPC-B benchmark](https://www.tpc.org/TPC_Documents_Latest_Versions/TPC-B_v2.0.0.pdf). It is the default mode when `connectionType` is set to `primary-service`. + // * `select-only`: The `tpcb-like` but only using SELECTs commands. It is the default mode when `connectionType` is set to `replicas-service`. + // * `custom`: will use the scripts in the `custom` section to initialize and and run commands for the benchmark. + // * `replay`: will replay the sampled queries of a sampling benchmark SGDbOps. If the `custom` section is specified it will be used instead. Queries can be referenced setting `custom.scripts.replay` to the index of the query in the sampling benchmark SGDbOps's status (index start from 0). + // + // See also https://www.postgresql.org/docs/current/pgbench.html#TRANSACTIONS-AND-SCRIPTS + Mode *string `json:"mode,omitempty"` + + // Perform no vacuuming during initialization. (This option suppresses the `v` initialization step, even if it was specified in `initSteps`.) + NoVacuum *bool `json:"noVacuum,omitempty"` + + // Create a partitioned pgbench_accounts table with the specified method. Expected values are `range` or `hash`. This option requires that partitions is set to non-zero. If unspecified, default is `range`. (This option only take effect if `custom.initiailization` is not specified). + PartitionMethod *string `json:"partitionMethod,omitempty"` + + // Create a partitioned pgbench_accounts table with the specified number of partitions of nearly equal size for the scaled number of accounts. Default is 0, meaning no partitioning. (This option only take effect if `custom.initiailization` is not specified). + Partitions *int `json:"partitions,omitempty"` + + // Protocol to use for submitting queries to the server: + // + // * `simple`: use simple query protocol. + // * `extended`: use extended query protocol. + // * `prepared`: use extended query protocol with prepared statements. + // + // In the prepared mode, pgbench reuses the parse analysis result starting from the second query iteration, so pgbench runs faster than in other modes. + // + // The default is `simple` query protocol. See also https://www.postgresql.org/docs/current/protocol.html + QueryMode *string `json:"queryMode,omitempty"` + + // Sampling rate, used when collecting data, to reduce the amount of collected data. If this option is given, only the specified fraction of transactions are collected. 1.0 means all transactions will be logged, 0.05 means only 5% of the transactions will be logged. + SamplingRate *float32 `json:"samplingRate,omitempty"` + + // benchmark SGDbOps of type sampling that will be used to replay sampled queries. + SamplingSGDbOps *string `json:"samplingSGDbOps,omitempty"` + // Number of worker threads within pgbench. Using more than one thread can be helpful on multi-CPU machines. Clients are distributed as evenly as possible among available threads. Default is `1`. Threads *int `json:"threads,omitempty"` + // Create all tables as unlogged tables, rather than permanent tables. (This option only take effect if `custom.initiailization` is not specified). + UnloggedTables *bool `json:"unloggedTables,omitempty"` + + // **Deprecated** this field is ignored, use `queryMode` instead. + // // Use extended query protocol with prepared statements. Defaults to: `false`. UsePreparedStatements *bool `json:"usePreparedStatements,omitempty"` } +// SGDbOpsSpecBenchmarkPgbenchCustom defines model for SGDbOpsSpecBenchmarkPgbenchCustom. +type SGDbOpsSpecBenchmarkPgbenchCustom struct { + // The custom SQL for initialization that will be executed in place of pgbench default initialization. + // + // If not specified the default pgbench initialization will be performed instead. + Initialization *SGDbOpsSpecBenchmarkPgbenchCustomInitialization `json:"initialization,omitempty"` + + // The custom SQL scripts that will be executed by pgbench during the benchmark instead of default pgbench scripts + Scripts *[]SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem `json:"scripts,omitempty"` +} + +// SGDbOpsSpecBenchmarkPgbenchCustomInitialization defines model for SGDbOpsSpecBenchmarkPgbenchCustomInitialization. +type SGDbOpsSpecBenchmarkPgbenchCustomInitialization struct { + // Raw SQL script to execute. This field is mutually exclusive with `scriptFrom` field. + Script *string `json:"script,omitempty"` + + // Reference to either a Kubernetes [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) or a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) that contains the SQL script to execute. This field is mutually exclusive with `script` field. + // + // Fields `secretKeyRef` and `configMapKeyRef` are mutually exclusive, and one of them is required. + ScriptFrom *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFrom `json:"scriptFrom,omitempty"` +} + +// SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFrom defines model for SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFrom. +type SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFrom struct { + // A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) reference that contains the SQL script to execute. This field is mutually exclusive with `secretKeyRef` field. + ConfigMapKeyRef *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromConfigMapKeyRef `json:"configMapKeyRef,omitempty"` + + // A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) that contains the SQL script to execute. This field is mutually exclusive with `configMapKeyRef` field. + SecretKeyRef *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromSecretKeyRef `json:"secretKeyRef,omitempty"` +} + +// SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromConfigMapKeyRef defines model for SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromConfigMapKeyRef. +type SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromConfigMapKeyRef struct { + // The key name within the ConfigMap that contains the SQL script to execute. + Key *string `json:"key,omitempty"` + + // The name of the ConfigMap that contains the SQL script to execute. + Name *string `json:"name,omitempty"` +} + +// SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromSecretKeyRef defines model for SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromSecretKeyRef. +type SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromSecretKeyRef struct { + // The key of the secret to select from. Must be a valid secret key. + Key *string `json:"key,omitempty"` + + // Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + Name *string `json:"name,omitempty"` +} + +// SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem defines model for SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem. +type SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem struct { + // The name of the builtin script to use. See https://www.postgresql.org/docs/current/pgbench.html#PGBENCH-OPTION-BUILTIN + // + // When specified fields `replay`, `script` and `scriptFrom` must not be set. + Builtin *string `json:"builtin,omitempty"` + + // The index of the query in the sampling benchmark SGDbOps's status (index start from 0). + // + // When specified fields `builtin`, `script` and `scriptFrom` must not be set. + Replay *int `json:"replay,omitempty"` + + // Raw SQL script to execute. This field is mutually exclusive with `scriptFrom` field. + Script *string `json:"script,omitempty"` + + // Reference to either a Kubernetes [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) or a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) that contains the SQL script to execute. This field is mutually exclusive with `script` field. + // + // Fields `secretKeyRef` and `configMapKeyRef` are mutually exclusive, and one of them is required. + ScriptFrom *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFrom `json:"scriptFrom,omitempty"` + + // The weight of this custom SQL script. + Weight *int `json:"weight,omitempty"` +} + +// SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFrom defines model for SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFrom. +type SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFrom struct { + // A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) reference that contains the SQL script to execute. This field is mutually exclusive with `secretKeyRef` field. + ConfigMapKeyRef *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromConfigMapKeyRef `json:"configMapKeyRef,omitempty"` + + // A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) that contains the SQL script to execute. This field is mutually exclusive with `configMapKeyRef` field. + SecretKeyRef *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromSecretKeyRef `json:"secretKeyRef,omitempty"` +} + +// SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromConfigMapKeyRef defines model for SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromConfigMapKeyRef. +type SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromConfigMapKeyRef struct { + // The key name within the ConfigMap that contains the SQL script to execute. + Key *string `json:"key,omitempty"` + + // The name of the ConfigMap that contains the SQL script to execute. + Name *string `json:"name,omitempty"` +} + +// SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromSecretKeyRef defines model for SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromSecretKeyRef. +type SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromSecretKeyRef struct { + // The key of the secret to select from. Must be a valid secret key. + Key *string `json:"key,omitempty"` + + // Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + Name *string `json:"name,omitempty"` +} + +// SGDbOpsSpecBenchmarkSampling defines model for SGDbOpsSpecBenchmarkSampling. +type SGDbOpsSpecBenchmarkSampling struct { + // The query used to select top queries. Will be ignored if `mode` is not set to `custom`. + // + // The query must return at most 2 columns: + // + // * First column returned by the query must be a column holding the query identifier, also available in pg_stat_activity (column `query_id`) and pg_stat_statements (column `queryid`). + // * Second column is optional and, if returned, must hold a json object containing only text keys and values stat will be used to generate the stats. + // + // See also: + // + // * https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW + // * https://www.postgresql.org/docs/current/pgstatstatements.html#PGSTATSTATEMENTS-PG-STAT-STATEMENTS + CustomTopQueriesQuery *string `json:"customTopQueriesQuery,omitempty"` + + // The mode used to select the top queries used for sampling: + // + // * `time`: The top queries will be selected among the most slow queries. + // * `calls`: The top queries will be selected among the most called queries. + // * `custom`: The `customTopQueriesQuery` will be used to select top queries. + Mode *string `json:"mode,omitempty"` + + // When `true` omit to include the top queries stats in the SGDbOps status. By default `false`. + OmitTopQueriesInStatus *bool `json:"omitTopQueriesInStatus,omitempty"` + + // Number of sampled queries to include in the result. By default `10`. + Queries *int `json:"queries,omitempty"` + + // An ISO 8601 duration in the format `PnDTnHnMn.nS`, that specifies how long will last the sampling of real queries that will be replayed later. + SamplingDuration string `json:"samplingDuration"` + + // Minimum number of microseconds the sampler will wait between each sample is taken. By default `10000` (10 milliseconds). + SamplingMinInterval *int `json:"samplingMinInterval,omitempty"` + + // The target database to be sampled. By default `postgres`. + // + // The benchmark database will be used to store the sampled queries but user must specify a target database to be sampled in the `sampling` section. + TargetDatabase string `json:"targetDatabase"` + + // An ISO 8601 duration in the format `PnDTnHnMn.nS`, that specifies how long the to wait before selecting top queries in order to collect enough stats. + TopQueriesCollectDuration string `json:"topQueriesCollectDuration"` + + // Regular expression for filtering representative statements when selecting top queries. Will be ignored if `mode` is set to `custom`. By default is `^ *(with|select) `. See https://www.postgresql.org/docs/current/functions-matching.html#FUNCTIONS-POSIX-REGEXP + TopQueriesFilter *string `json:"topQueriesFilter,omitempty"` + + // Minimum number of queries to consider as part of the top queries. By default `5`. + TopQueriesMin *int `json:"topQueriesMin,omitempty"` + + // Percentile of queries to consider as part of the top queries. Will be ignored if `mode` is set to `custom`. By default `95`. + TopQueriesPercentile *int `json:"topQueriesPercentile,omitempty"` +} + // SGDbOpsSpecMajorVersionUpgrade defines model for SGDbOpsSpecMajorVersionUpgrade. type SGDbOpsSpecMajorVersionUpgrade struct { // The path were the backup is stored. If not set this field is filled up by the operator. // // When provided will indicate were the backups and WAL files will be stored. // - // The path should be different from the current `.spec.configurations.backupPath` value for the target `SGCluster` - // in order to avoid mixing WAL files of two distinct major versions of postgres. + // The path should be different from the current `.spec.configurations.backups[].path` value for the target `SGCluster` + // in order to avoid mixing WAL files of two distinct major versions of postgres. BackupPath *string `json:"backupPath,omitempty"` // If true does some checks to see if the cluster can perform a major version upgrade without changing any data. Defaults to: `false`. @@ -112,21 +369,74 @@ type SGDbOpsSpecMajorVersionUpgrade struct { // If true use efficient file cloning (also known as "reflinks" on some systems) instead of copying files to the new cluster. // This can result in near-instantaneous copying of the data files, giving the speed advantages of `link` while leaving the old - // cluster untouched. This option is mutually exclusive with `link`. Defaults to: `false`. + // cluster untouched. This option is mutually exclusive with `link`. Defaults to: `false`. // // File cloning is only supported on some operating systems and file systems. If it is selected but not supported, the pg_upgrade - // run will error. At present, it is supported on Linux (kernel 4.5 or later) with Btrfs and XFS (on file systems created with - // reflink support), and on macOS with APFS. + // run will error. At present, it is supported on Linux (kernel 4.5 or later) with Btrfs and XFS (on file systems created with + // reflink support), and on macOS with APFS. Clone *bool `json:"clone,omitempty"` // If true use hard links instead of copying files to the new cluster. This option is mutually exclusive with `clone`. Defaults to: `false`. Link *bool `json:"link,omitempty"` + // A major version upgrade can not be performed if a required extension is not present for the target major version of the upgrade. + // In those cases you will have to provide the target extension version of the extension for the target major version of postgres. + // Beware that in some cases it is not possible to upgrade an extension alongside postgres. This is the case for PostGIS or timescaledb. + // In such cases you will have to upgrade the extension before or after the major version upgrade. Please make sure you read the + // documentation of each extension in order to understand if it is possible to upgrade it during a major version upgrade of postgres. + PostgresExtensions *[]SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem `json:"postgresExtensions,omitempty"` + // The target postgres version that must have the same major version of the target SGCluster. PostgresVersion *string `json:"postgresVersion,omitempty"` // The postgres config that must have the same major version of the target postgres version. SgPostgresConfig *string `json:"sgPostgresConfig,omitempty"` + + // The list of Postgres extensions to install. + // + // **This section is filled by the operator.** + ToInstallPostgresExtensions *[]SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem `json:"toInstallPostgresExtensions,omitempty"` +} + +// SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem defines model for SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem. +type SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem struct { + // The name of the extension to deploy. + Name string `json:"name"` + + // The id of the publisher of the extension to deploy. If not specified `com.ongres` will be used by default. + Publisher *string `json:"publisher,omitempty"` + + // The repository base URL from where to obtain the extension to deploy. + // + // **This section is filled by the operator.** + Repository *string `json:"repository,omitempty"` + + // The version of the extension to deploy. If not specified version of `stable` channel will be used by default and if only a version is available that one will be used. + Version *string `json:"version,omitempty"` +} + +// SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem defines model for SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem. +type SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem struct { + // The build version of the extension to install. + Build *string `json:"build,omitempty"` + + // The extra mounts of the extension to install. + ExtraMounts *[]string `json:"extraMounts,omitempty"` + + // The name of the extension to install. + Name string `json:"name"` + + // The postgres major version of the extension to install. + PostgresVersion string `json:"postgresVersion"` + + // The id of the publisher of the extension to install. + Publisher string `json:"publisher"` + + // The repository base URL from where the extension will be installed from. + Repository string `json:"repository"` + + // The version of the extension to install. + Version string `json:"version"` } // SGDbOpsSpecMinorVersionUpgrade defines model for SGDbOpsSpecMinorVersionUpgrade. @@ -198,7 +508,7 @@ type SGDbOpsSpecRestart struct { Method *string `json:"method,omitempty"` // By default all Pods are restarted. Setting this option to `true` allow to restart only those Pods which - // are in pending restart state as detected by the operation. Defaults to: `false`. + // are in pending restart state as detected by the operation. Defaults to: `false`. OnlyPendingRestart *bool `json:"onlyPendingRestart,omitempty"` } @@ -206,7 +516,7 @@ type SGDbOpsSpecRestart struct { type SGDbOpsSpecScheduling struct { // Node affinity is a group of node affinity scheduling rules. // - // See: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#nodeaffinity-v1-core + // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core NodeAffinity *SGDbOpsSpecSchedulingNodeAffinity `json:"nodeAffinity,omitempty"` // NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ @@ -214,17 +524,20 @@ type SGDbOpsSpecScheduling struct { // Pod affinity is a group of inter pod affinity scheduling rules. // - // See: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#podaffinity-v1-core + // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core PodAffinity *SGDbOpsSpecSchedulingPodAffinity `json:"podAffinity,omitempty"` // Pod anti affinity is a group of inter pod anti affinity scheduling rules. // - // See: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#podantiaffinity-v1-core + // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core PodAntiAffinity *SGDbOpsSpecSchedulingPodAntiAffinity `json:"podAntiAffinity,omitempty"` + // If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. + PriorityClassName *string `json:"priorityClassName,omitempty"` + // If specified, the pod's tolerations. // - // See: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#toleration-v1-core + // See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core Tolerations *[]SGDbOpsSpecSchedulingTolerationsItem `json:"tolerations,omitempty"` } @@ -261,7 +574,6 @@ type SGDbOpsSpecSchedulingNodeAffinityPreferredDuringSchedulingIgnoredDuringExec Key string `json:"key"` // Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - // Operator string `json:"operator"` // An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. @@ -274,7 +586,6 @@ type SGDbOpsSpecSchedulingNodeAffinityPreferredDuringSchedulingIgnoredDuringExec Key string `json:"key"` // Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - // Operator string `json:"operator"` // An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. @@ -302,7 +613,6 @@ type SGDbOpsSpecSchedulingNodeAffinityRequiredDuringSchedulingIgnoredDuringExecu Key string `json:"key"` // Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - // Operator string `json:"operator"` // An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. @@ -315,7 +625,6 @@ type SGDbOpsSpecSchedulingNodeAffinityRequiredDuringSchedulingIgnoredDuringExecu Key string `json:"key"` // Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - // Operator string `json:"operator"` // An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. @@ -345,6 +654,12 @@ type SGDbOpsSpecSchedulingPodAffinityPreferredDuringSchedulingIgnoredDuringExecu // A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. LabelSelector *SGDbOpsSpecSchedulingPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItemPodAffinityTermLabelSelector `json:"labelSelector,omitempty"` + // MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + MatchLabelKeys *[]string `json:"matchLabelKeys,omitempty"` + + // MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + MismatchLabelKeys *[]string `json:"mismatchLabelKeys,omitempty"` + // A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. NamespaceSelector *SGDbOpsSpecSchedulingPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItemPodAffinityTermNamespaceSelector `json:"namespaceSelector,omitempty"` @@ -402,6 +717,12 @@ type SGDbOpsSpecSchedulingPodAffinityRequiredDuringSchedulingIgnoredDuringExecut // A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. LabelSelector *SGDbOpsSpecSchedulingPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionItemLabelSelector `json:"labelSelector,omitempty"` + // MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + MatchLabelKeys *[]string `json:"matchLabelKeys,omitempty"` + + // MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + MismatchLabelKeys *[]string `json:"mismatchLabelKeys,omitempty"` + // A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. NamespaceSelector *SGDbOpsSpecSchedulingPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionItemNamespaceSelector `json:"namespaceSelector,omitempty"` @@ -477,6 +798,12 @@ type SGDbOpsSpecSchedulingPodAntiAffinityPreferredDuringSchedulingIgnoredDuringE // A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. LabelSelector *SGDbOpsSpecSchedulingPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItemPodAffinityTermLabelSelector `json:"labelSelector,omitempty"` + // MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + MatchLabelKeys *[]string `json:"matchLabelKeys,omitempty"` + + // MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + MismatchLabelKeys *[]string `json:"mismatchLabelKeys,omitempty"` + // A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. NamespaceSelector *SGDbOpsSpecSchedulingPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItemPodAffinityTermNamespaceSelector `json:"namespaceSelector,omitempty"` @@ -534,6 +861,12 @@ type SGDbOpsSpecSchedulingPodAntiAffinityRequiredDuringSchedulingIgnoredDuringEx // A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. LabelSelector *SGDbOpsSpecSchedulingPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionItemLabelSelector `json:"labelSelector,omitempty"` + // MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + MatchLabelKeys *[]string `json:"matchLabelKeys,omitempty"` + + // MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + MismatchLabelKeys *[]string `json:"mismatchLabelKeys,omitempty"` + // A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. NamespaceSelector *SGDbOpsSpecSchedulingPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionItemNamespaceSelector `json:"namespaceSelector,omitempty"` @@ -589,14 +922,12 @@ type SGDbOpsSpecSchedulingPodAntiAffinityRequiredDuringSchedulingIgnoredDuringEx // SGDbOpsSpecSchedulingTolerationsItem defines model for SGDbOpsSpecSchedulingTolerationsItem. type SGDbOpsSpecSchedulingTolerationsItem struct { // Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - // Effect *string `json:"effect,omitempty"` // Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. Key *string `json:"key,omitempty"` // Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - // Operator *string `json:"operator,omitempty"` // TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. @@ -627,22 +958,22 @@ type SGDbOpsSpecVacuum struct { Databases *[]SGDbOpsSpecVacuumDatabasesItem `json:"databases,omitempty"` // Normally, VACUUM will skip pages based on the visibility map. Pages where all tuples are known to be frozen can always be - // skipped, and those where all tuples are known to be visible to all transactions may be skipped except when performing an - // aggressive vacuum. Furthermore, except when performing an aggressive vacuum, some pages may be skipped in order to avoid - // waiting for other sessions to finish using them. This option disables all page-skipping behavior, and is intended to be - // used only when the contents of the visibility map are suspect, which should happen only if there is a hardware or - // software issue causing database corruption. Defaults to: `false`. + // skipped, and those where all tuples are known to be visible to all transactions may be skipped except when performing an + // aggressive vacuum. Furthermore, except when performing an aggressive vacuum, some pages may be skipped in order to avoid + // waiting for other sessions to finish using them. This option disables all page-skipping behavior, and is intended to be + // used only when the contents of the visibility map are suspect, which should happen only if there is a hardware or + // software issue causing database corruption. Defaults to: `false`. DisablePageSkipping *bool `json:"disablePageSkipping,omitempty"` // If true selects aggressive "freezing" of tuples. Specifying FREEZE is equivalent to performing VACUUM with the - // vacuum_freeze_min_age and vacuum_freeze_table_age parameters set to zero. Aggressive freezing is always performed - // when the table is rewritten, so this option is redundant when FULL is specified. Defaults to: `false`. + // vacuum_freeze_min_age and vacuum_freeze_table_age parameters set to zero. Aggressive freezing is always performed + // when the table is rewritten, so this option is redundant when FULL is specified. Defaults to: `false`. Freeze *bool `json:"freeze,omitempty"` // If true selects "full" vacuum, which can reclaim more space, but takes much longer and exclusively locks the table. // This method also requires extra disk space, since it writes a new copy of the table and doesn't release the old copy - // until the operation is complete. Usually this should only be used when a significant amount of space needs to be - // reclaimed from within the table. Defaults to: `false`. + // until the operation is complete. Usually this should only be used when a significant amount of space needs to be + // reclaimed from within the table. Defaults to: `false`. Full *bool `json:"full,omitempty"` } @@ -652,22 +983,22 @@ type SGDbOpsSpecVacuumDatabasesItem struct { Analyze *bool `json:"analyze,omitempty"` // Normally, VACUUM will skip pages based on the visibility map. Pages where all tuples are known to be frozen can always be - // skipped, and those where all tuples are known to be visible to all transactions may be skipped except when performing an - // aggressive vacuum. Furthermore, except when performing an aggressive vacuum, some pages may be skipped in order to avoid - // waiting for other sessions to finish using them. This option disables all page-skipping behavior, and is intended to be - // used only when the contents of the visibility map are suspect, which should happen only if there is a hardware or - // software issue causing database corruption. Defaults to: `false`. + // skipped, and those where all tuples are known to be visible to all transactions may be skipped except when performing an + // aggressive vacuum. Furthermore, except when performing an aggressive vacuum, some pages may be skipped in order to avoid + // waiting for other sessions to finish using them. This option disables all page-skipping behavior, and is intended to be + // used only when the contents of the visibility map are suspect, which should happen only if there is a hardware or + // software issue causing database corruption. Defaults to: `false`. DisablePageSkipping *bool `json:"disablePageSkipping,omitempty"` // If true selects aggressive "freezing" of tuples. Specifying FREEZE is equivalent to performing VACUUM with the - // vacuum_freeze_min_age and vacuum_freeze_table_age parameters set to zero. Aggressive freezing is always performed - // when the table is rewritten, so this option is redundant when FULL is specified. Defaults to: `false`. + // vacuum_freeze_min_age and vacuum_freeze_table_age parameters set to zero. Aggressive freezing is always performed + // when the table is rewritten, so this option is redundant when FULL is specified. Defaults to: `false`. Freeze *bool `json:"freeze,omitempty"` // If true selects "full" vacuum, which can reclaim more space, but takes much longer and exclusively locks the table. // This method also requires extra disk space, since it writes a new copy of the table and doesn't release the old copy - // until the operation is complete. Usually this should only be used when a significant amount of space needs to be - // reclaimed from within the table. Defaults to: `false`. + // until the operation is complete. Usually this should only be used when a significant amount of space needs to be + // reclaimed from within the table. Defaults to: `false`. Full *bool `json:"full,omitempty"` // the name of the database @@ -709,16 +1040,25 @@ type SGDbOpsStatus struct { type SGDbOpsStatusBenchmark struct { // The results of the pgbench benchmark Pgbench *SGDbOpsStatusBenchmarkPgbench `json:"pgbench,omitempty"` + + // The results of the sampling benchmark + Sampling *SGDbOpsStatusBenchmarkSampling `json:"sampling,omitempty"` } // SGDbOpsStatusBenchmarkPgbench defines model for SGDbOpsStatusBenchmarkPgbench. type SGDbOpsStatusBenchmarkPgbench struct { + // Compressed and base 64 encoded HdrHistogram + HdrHistogram *string `json:"hdrHistogram,omitempty"` + // The latency results of the pgbench benchmark Latency *SGDbOpsStatusBenchmarkPgbenchLatency `json:"latency,omitempty"` // The scale factor used to run pgbench (`--scale`). ScaleFactor *float32 `json:"scaleFactor"` + // Average per-statement latency (execution time from the perspective of the client) of each command after the benchmark finishes + Statements *[]SGDbOpsStatusBenchmarkPgbenchStatementsItem `json:"statements,omitempty"` + // All the transactions per second results of the pgbench benchmark TransactionsPerSecond *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecond `json:"transactionsPerSecond,omitempty"` @@ -737,7 +1077,7 @@ type SGDbOpsStatusBenchmarkPgbenchLatency struct { // SGDbOpsStatusBenchmarkPgbenchLatencyAverage defines model for SGDbOpsStatusBenchmarkPgbenchLatencyAverage. type SGDbOpsStatusBenchmarkPgbenchLatencyAverage struct { - // The latency measure unit represented in milliseconds + // The latency measure unit Unit *string `json:"unit,omitempty"` // The latency average value @@ -746,38 +1086,101 @@ type SGDbOpsStatusBenchmarkPgbenchLatencyAverage struct { // SGDbOpsStatusBenchmarkPgbenchLatencyStandardDeviation defines model for SGDbOpsStatusBenchmarkPgbenchLatencyStandardDeviation. type SGDbOpsStatusBenchmarkPgbenchLatencyStandardDeviation struct { - // The latency measure unit represented in milliseconds + // The latency measure unit Unit *string `json:"unit,omitempty"` // The latency standard deviation value Value *float32 `json:"value"` } +// SGDbOpsStatusBenchmarkPgbenchStatementsItem defines model for SGDbOpsStatusBenchmarkPgbenchStatementsItem. +type SGDbOpsStatusBenchmarkPgbenchStatementsItem struct { + // The command + Command *string `json:"command,omitempty"` + + // Average latency of the command + Latency *float32 `json:"latency,omitempty"` + + // The script index (`0` if no custom scripts have been defined) + Script *int `json:"script,omitempty"` + + // The average latency measure unit + Unit *string `json:"unit,omitempty"` +} + // SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecond defines model for SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecond. type SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecond struct { - // Number of Transaction Per Second (tps) excluding connection establishing. + // Number of Transactions Per Second (tps) excluding connection establishing. ExcludingConnectionsEstablishing *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondExcludingConnectionsEstablishing `json:"excludingConnectionsEstablishing,omitempty"` - // Number of Transaction Per Second (tps) including connection establishing. + // Number of Transactions Per Second (tps) including connection establishing. IncludingConnectionsEstablishing *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondIncludingConnectionsEstablishing `json:"includingConnectionsEstablishing,omitempty"` + + // The Transactions Per Second (tps) values aggregated over unit of time + OverTime *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondOverTime `json:"overTime,omitempty"` } // SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondExcludingConnectionsEstablishing defines model for SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondExcludingConnectionsEstablishing. type SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondExcludingConnectionsEstablishing struct { - // Transaction Per Second (tps) measure + // Transactions Per Second (tps) measure unit Unit *string `json:"unit,omitempty"` - // The Transaction Per Second (tps) excluding connections establishing value + // The Transactions Per Second (tps) excluding connections establishing value Value *float32 `json:"value"` } // SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondIncludingConnectionsEstablishing defines model for SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondIncludingConnectionsEstablishing. type SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondIncludingConnectionsEstablishing struct { - // Transaction Per Second (tps) measure + // Transactions Per Second (tps) measure unit Unit *string `json:"unit,omitempty"` - // The Transaction Per Second (tps) including connections establishing value - Value *float32 `json:"value"` + // The Transactions Per Second (tps) including connections establishing value + Value *float32 `json:"value,omitempty"` +} + +// SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondOverTime defines model for SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondOverTime. +type SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondOverTime struct { + // The interval duration used to aggregate the transactions per second. + IntervalDuration *float32 `json:"intervalDuration,omitempty"` + + // The interval duration measure unit + IntervalDurationUnit *string `json:"intervalDurationUnit,omitempty"` + + // The Transactions Per Second (tps) values aggregated over unit of time + Values *[]float32 `json:"values,omitempty"` + + // The Transactions Per Second (tps) measures unit + ValuesUnit *string `json:"valuesUnit,omitempty"` +} + +// SGDbOpsStatusBenchmarkSampling defines model for SGDbOpsStatusBenchmarkSampling. +type SGDbOpsStatusBenchmarkSampling struct { + // The queries sampled. + Queries *[]SGDbOpsStatusBenchmarkSamplingQueriesItem `json:"queries,omitempty"` + + // The top queries sampled with the stats from pg_stat_statements. If is omitted if `omitTopQueriesInStatus` is set to `true`. + TopQueries *[]SGDbOpsStatusBenchmarkSamplingTopQueriesItem `json:"topQueries,omitempty"` +} + +// SGDbOpsStatusBenchmarkSamplingQueriesItem defines model for SGDbOpsStatusBenchmarkSamplingQueriesItem. +type SGDbOpsStatusBenchmarkSamplingQueriesItem struct { + // The query id of the representative statement calculated by Postgres + Id *string `json:"id,omitempty"` + + // A sampled SQL query + Query *string `json:"query,omitempty"` + + // The sampled query timestamp + Timestamp *string `json:"timestamp,omitempty"` +} + +// SGDbOpsStatusBenchmarkSamplingTopQueriesItem defines model for SGDbOpsStatusBenchmarkSamplingTopQueriesItem. +type SGDbOpsStatusBenchmarkSamplingTopQueriesItem struct { + // The query id of the representative statement calculated by Postgres + Id *string `json:"id,omitempty"` + + // stats collected by the top queries query + Stats map[string]string `json:"stats,omitempty"` } // SGDbOpsStatusConditionsItem defines model for SGDbOpsStatusConditionsItem. @@ -809,6 +1212,9 @@ type SGDbOpsStatusMajorVersionUpgrade struct { // The instances that are pending to be restarted PendingToRestartInstances *[]string `json:"pendingToRestartInstances,omitempty"` + // The phase the operation is or was executing) + Phase *string `json:"phase,omitempty"` + // The primary instance when the operation started PrimaryInstance *string `json:"primaryInstance,omitempty"` diff --git a/apis/stackgres/v1/sgdbops.yaml b/apis/stackgres/v1/sgdbops.yaml index 2bcccfdad9..e115f9a773 100644 --- a/apis/stackgres/v1/sgdbops.yaml +++ b/apis/stackgres/v1/sgdbops.yaml @@ -35,519 +35,11 @@ components: type: string description: | NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - tolerations: - description: | - If specified, the pod's tolerations. - - See: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#toleration-v1-core - type: array - items: - description: | - The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator - - See: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#toleration-v1-core - properties: - effect: - description: 'Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - - -' - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: 'Operator represents a key''s relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - - -' - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - nodeAffinity: - description: | - Node affinity is a group of node affinity scheduling rules. - - See: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#nodeaffinity-v1-core - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: 'Represents a key''s relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - - -' - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: 'Represents a key''s relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - - -' - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - weight - - preference - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: 'Represents a key''s relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - - -' - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: 'Represents a key''s relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - - -' - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: | - Pod affinity is a group of inter pod affinity scheduling rules. - - See: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#podaffinity-v1-core - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - weight - - podAffinityTerm - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: | - Pod anti affinity is a group of inter pod anti affinity scheduling rules. - - See: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#podantiaffinity-v1-core - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - weight - - podAffinityTerm - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object + tolerations: &tolerations {"description": "If specified, the pod's tolerations.\n\nSee https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core", "items": {"description": "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", "properties": {"effect": {"description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", "type": "string"}, "key": {"description": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", "type": "string"}, "operator": {"description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", "type": "string"}, "tolerationSeconds": {"description": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", "format": "int64", "type": "integer"}, "value": {"description": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", "type": "string"}}, "type": "object"}, "type": "array"} + nodeAffinity: &node-affinity {"description": "Node affinity is a group of node affinity scheduling rules.\n\nSee https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core", "properties": {"preferredDuringSchedulingIgnoredDuringExecution": {"description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", "items": {"description": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", "properties": {"preference": {"description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", "properties": {"matchExpressions": {"description": "A list of node selector requirements by node's labels.", "items": {"description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "The label key that the selector applies to.", "type": "string"}, "operator": {"description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", "type": "string"}, "values": {"description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}, "matchFields": {"description": "A list of node selector requirements by node's fields.", "items": {"description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "The label key that the selector applies to.", "type": "string"}, "operator": {"description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", "type": "string"}, "values": {"description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}}, "type": "object"}, "weight": {"description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", "format": "int32", "type": "integer"}}, "required": ["weight", "preference"], "type": "object"}, "type": "array"}, "requiredDuringSchedulingIgnoredDuringExecution": {"description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", "properties": {"nodeSelectorTerms": {"description": "Required. A list of node selector terms. The terms are ORed.", "items": {"description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", "properties": {"matchExpressions": {"description": "A list of node selector requirements by node's labels.", "items": {"description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "The label key that the selector applies to.", "type": "string"}, "operator": {"description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", "type": "string"}, "values": {"description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}, "matchFields": {"description": "A list of node selector requirements by node's fields.", "items": {"description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "The label key that the selector applies to.", "type": "string"}, "operator": {"description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", "type": "string"}, "values": {"description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}}, "type": "object"}, "type": "array"}}, "required": ["nodeSelectorTerms"], "type": "object"}}, "type": "object"} + priorityClassName: &priority-class-name {"description": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", "type": "string"} + podAffinity: &pod-affinity {"description": "Pod affinity is a group of inter pod affinity scheduling rules.\n\nSee https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core", "properties": {"preferredDuringSchedulingIgnoredDuringExecution": {"description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", "items": {"description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", "properties": {"podAffinityTerm": {"description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", "properties": {"labelSelector": {"description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "properties": {"matchExpressions": {"description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", "items": {"description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "key is the label key that the selector applies to.", "type": "string"}, "operator": {"description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", "type": "string"}, "values": {"description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}, "matchLabels": {"additionalProperties": {"type": "string"}, "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", "type": "object"}}, "type": "object"}, "matchLabelKeys": {"description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", "items": {"type": "string"}, "type": "array"}, "mismatchLabelKeys": {"description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", "items": {"type": "string"}, "type": "array"}, "namespaceSelector": {"description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "properties": {"matchExpressions": {"description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", "items": {"description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "key is the label key that the selector applies to.", "type": "string"}, "operator": {"description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", "type": "string"}, "values": {"description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}, "matchLabels": {"additionalProperties": {"type": "string"}, "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", "type": "object"}}, "type": "object"}, "namespaces": {"description": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "items": {"type": "string"}, "type": "array"}, "topologyKey": {"description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "type": "string"}}, "required": ["topologyKey"], "type": "object"}, "weight": {"description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", "format": "int32", "type": "integer"}}, "required": ["weight", "podAffinityTerm"], "type": "object"}, "type": "array"}, "requiredDuringSchedulingIgnoredDuringExecution": {"description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", "items": {"description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", "properties": {"labelSelector": {"description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "properties": {"matchExpressions": {"description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", "items": {"description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "key is the label key that the selector applies to.", "type": "string"}, "operator": {"description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", "type": "string"}, "values": {"description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}, "matchLabels": {"additionalProperties": {"type": "string"}, "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", "type": "object"}}, "type": "object"}, "matchLabelKeys": {"description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", "items": {"type": "string"}, "type": "array"}, "mismatchLabelKeys": {"description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", "items": {"type": "string"}, "type": "array"}, "namespaceSelector": {"description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "properties": {"matchExpressions": {"description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", "items": {"description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "key is the label key that the selector applies to.", "type": "string"}, "operator": {"description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", "type": "string"}, "values": {"description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}, "matchLabels": {"additionalProperties": {"type": "string"}, "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", "type": "object"}}, "type": "object"}, "namespaces": {"description": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "items": {"type": "string"}, "type": "array"}, "topologyKey": {"description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "type": "string"}}, "required": ["topologyKey"], "type": "object"}, "type": "array"}}, "type": "object"} + podAntiAffinity: &pod-anti-affinity {"description": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.\n\nSee https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core", "properties": {"preferredDuringSchedulingIgnoredDuringExecution": {"description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", "items": {"description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", "properties": {"podAffinityTerm": {"description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", "properties": {"labelSelector": {"description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "properties": {"matchExpressions": {"description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", "items": {"description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "key is the label key that the selector applies to.", "type": "string"}, "operator": {"description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", "type": "string"}, "values": {"description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}, "matchLabels": {"additionalProperties": {"type": "string"}, "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", "type": "object"}}, "type": "object"}, "matchLabelKeys": {"description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", "items": {"type": "string"}, "type": "array"}, "mismatchLabelKeys": {"description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", "items": {"type": "string"}, "type": "array"}, "namespaceSelector": {"description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "properties": {"matchExpressions": {"description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", "items": {"description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "key is the label key that the selector applies to.", "type": "string"}, "operator": {"description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", "type": "string"}, "values": {"description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}, "matchLabels": {"additionalProperties": {"type": "string"}, "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", "type": "object"}}, "type": "object"}, "namespaces": {"description": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "items": {"type": "string"}, "type": "array"}, "topologyKey": {"description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "type": "string"}}, "required": ["topologyKey"], "type": "object"}, "weight": {"description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", "format": "int32", "type": "integer"}}, "required": ["weight", "podAffinityTerm"], "type": "object"}, "type": "array"}, "requiredDuringSchedulingIgnoredDuringExecution": {"description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", "items": {"description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", "properties": {"labelSelector": {"description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "properties": {"matchExpressions": {"description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", "items": {"description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "key is the label key that the selector applies to.", "type": "string"}, "operator": {"description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", "type": "string"}, "values": {"description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}, "matchLabels": {"additionalProperties": {"type": "string"}, "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", "type": "object"}}, "type": "object"}, "matchLabelKeys": {"description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", "items": {"type": "string"}, "type": "array"}, "mismatchLabelKeys": {"description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", "items": {"type": "string"}, "type": "array"}, "namespaceSelector": {"description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "properties": {"matchExpressions": {"description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", "items": {"description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "properties": {"key": {"description": "key is the label key that the selector applies to.", "type": "string"}, "operator": {"description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", "type": "string"}, "values": {"description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", "items": {"type": "string"}, "type": "array"}}, "required": ["key", "operator"], "type": "object"}, "type": "array"}, "matchLabels": {"additionalProperties": {"type": "string"}, "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", "type": "object"}}, "type": "object"}, "namespaces": {"description": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "items": {"type": "string"}, "type": "array"}, "topologyKey": {"description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "type": "string"}}, "required": ["topologyKey"], "type": "object"}, "type": "array"}}, "type": "object"} op: type: string description: | @@ -560,7 +52,6 @@ components: * `restart`: perform a restart of the cluster. * `minorVersionUpgrade`: perform a minor version upgrade of PostgreSQL. * `securityUpgrade`: perform a security upgrade of the cluster. - * `upgrade`: perform a operator API upgrade of the cluster runAt: type: string description: | @@ -580,7 +71,7 @@ components: description: | The maximum number of retries the operation is allowed to do after a failure. - A value of `0` (zero) means no retries are made. Can not be greater than `10`. Defaults to: `0`. + A value of `0` (zero) means no retries are made. Defaults to: `0`. benchmark: type: object description: | @@ -592,11 +83,124 @@ components: The type of benchmark that will be performed on the SGCluster. Available benchmarks are: * `pgbench`: run [pgbench](https://www.postgresql.org/docs/current/pgbench.html) on the specified SGCluster and report the results in the status. + * `sampling`: samples real queries and store them in the SGDbOps status in order to be used by a `pgbench` benchmark using `replay` mode. + database: + type: string + description: | + When specified will indicate the database where the benchmark will run upon. + + If not specified a target database with a random name will be created and removed after the benchmark completes. + credentials: + type: object + description: The credentials of the user that will be used by the benchmark + required: ["username", "password"] + properties: + username: + type: object + description: | + The username that will be used by the benchmark. + + If not specified the default superuser username (by default postgres) will be used. + required: ["name", "key"] + properties: + name: + type: string + description: | + The Secret name where the username is stored. + key: + type: string + description: | + The Secret key where the username is stored. + password: + type: object + description: | + The password that will be used by the benchmark + + If not specified the default superuser password will be used. + required: ["name", "key"] + properties: + name: + type: string + description: | + The Secret name where the password is stored. + key: + type: string + description: | + The Secret key where the password is stored. + sampling: + type: object + description: | + Configuration of sampling benchmark. + required: [targetDatabase, topQueriesCollectDuration, samplingDuration] + properties: + targetDatabase: + type: string + description: | + The target database to be sampled. By default `postgres`. + + The benchmark database will be used to store the sampled queries but user must specify a target database to be sampled in the `sampling` section. + topQueriesCollectDuration: + type: string + description: An ISO 8601 duration in the format `PnDTnHnMn.nS`, that specifies how long the to wait before selecting top queries in order to collect enough stats. + samplingDuration: + type: string + description: An ISO 8601 duration in the format `PnDTnHnMn.nS`, that specifies how long will last the sampling of real queries that will be replayed later. + mode: + type: string + description: | + The mode used to select the top queries used for sampling: + + * `time`: The top queries will be selected among the most slow queries. + * `calls`: The top queries will be selected among the most called queries. + * `custom`: The `customTopQueriesQuery` will be used to select top queries. + topQueriesFilter: + type: string + description: Regular expression for filtering representative statements when selecting top queries. Will be ignored if `mode` is set to `custom`. By default is `^ *(with|select) `. See https://www.postgresql.org/docs/current/functions-matching.html#FUNCTIONS-POSIX-REGEXP + topQueriesPercentile: + type: integer + description: Percentile of queries to consider as part of the top queries. Will be ignored if `mode` is set to `custom`. By default `95`. + topQueriesMin: + type: integer + description: Minimum number of queries to consider as part of the top queries. By default `5`. + customTopQueriesQuery: + type: string + description: | + The query used to select top queries. Will be ignored if `mode` is not set to `custom`. + + The query must return at most 2 columns: + + * First column returned by the query must be a column holding the query identifier, also available in pg_stat_activity (column `query_id`) and pg_stat_statements (column `queryid`). + * Second column is optional and, if returned, must hold a json object containing only text keys and values stat will be used to generate the stats. + + See also: + + * https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW + * https://www.postgresql.org/docs/current/pgstatstatements.html#PGSTATSTATEMENTS-PG-STAT-STATEMENTS + queries: + type: integer + description: Number of sampled queries to include in the result. By default `10`. + omitTopQueriesInStatus: + type: boolean + description: When `true` omit to include the top queries stats in the SGDbOps status. By default `false`. + samplingMinInterval: + type: integer + description: Minimum number of microseconds the sampler will wait between each sample is taken. By default `10000` (10 milliseconds). pgbench: type: object description: | Configuration of [pgbench](https://www.postgresql.org/docs/current/pgbench.html) benchmark properties: + mode: + type: string + description: | + The pgbench benchmark type: + + * `tpcb-like`: The benchmark is inspired by the [TPC-B benchmark](https://www.tpc.org/TPC_Documents_Latest_Versions/TPC-B_v2.0.0.pdf). It is the default mode when `connectionType` is set to `primary-service`. + * `select-only`: The `tpcb-like` but only using SELECTs commands. It is the default mode when `connectionType` is set to `replicas-service`. + * `custom`: will use the scripts in the `custom` section to initialize and and run commands for the benchmark. + * `replay`: will replay the sampled queries of a sampling benchmark SGDbOps. If the `custom` section is specified it will be used instead. Queries can be referenced setting `custom.scripts.replay` to the index of the query in the sampling benchmark SGDbOps's status (index start from 0). + + See also https://www.postgresql.org/docs/current/pgbench.html#TRANSACTIONS-AND-SCRIPTS databaseSize: type: string pattern: '^[0-9]+(\.[0-9]+)?(Mi|Gi|Ti)$' @@ -609,7 +213,21 @@ components: usePreparedStatements: type: boolean description: | + **Deprecated** this field is ignored, use `queryMode` instead. + Use extended query protocol with prepared statements. Defaults to: `false`. + queryMode: + type: string + description: | + Protocol to use for submitting queries to the server: + + * `simple`: use simple query protocol. + * `extended`: use extended query protocol. + * `prepared`: use extended query protocol with prepared statements. + + In the prepared mode, pgbench reuses the parse analysis result starting from the second query iteration, so pgbench runs faster than in other modes. + + The default is `simple` query protocol. See also https://www.postgresql.org/docs/current/protocol.html concurrentClients: type: integer description: | @@ -618,6 +236,121 @@ components: type: integer description: | Number of worker threads within pgbench. Using more than one thread can be helpful on multi-CPU machines. Clients are distributed as evenly as possible among available threads. Default is `1`. + samplingRate: + type: number + description: | + Sampling rate, used when collecting data, to reduce the amount of collected data. If this option is given, only the specified fraction of transactions are collected. 1.0 means all transactions will be logged, 0.05 means only 5% of the transactions will be logged. + foreignKeys: + type: boolean + description: | + Create foreign key constraints between the standard tables. (This option only take effect if `custom.initiailization` is not specified). + unloggedTables: + type: boolean + description: | + Create all tables as unlogged tables, rather than permanent tables. (This option only take effect if `custom.initiailization` is not specified). + partitionMethod: + type: string + description: | + Create a partitioned pgbench_accounts table with the specified method. Expected values are `range` or `hash`. This option requires that partitions is set to non-zero. If unspecified, default is `range`. (This option only take effect if `custom.initiailization` is not specified). + partitions: + type: integer + description: | + Create a partitioned pgbench_accounts table with the specified number of partitions of nearly equal size for the scaled number of accounts. Default is 0, meaning no partitioning. (This option only take effect if `custom.initiailization` is not specified). + initSteps: + type: string + description: | + Perform just a selected set of the normal initialization steps. init_steps specifies the initialization steps to be performed, using one character per step. Each step is invoked in the specified order. The default is dtgvp. The available steps are: + + * `d` (Drop): Drop any existing pgbench tables. + * `t` (create Tables): Create the tables used by the standard pgbench scenario, namely pgbench_accounts, pgbench_branches, pgbench_history, and pgbench_tellers. + * `g` or `G` (Generate data, client-side or server-side): Generate data and load it into the standard tables, replacing any data already present. + With `g` (client-side data generation), data is generated in pgbench client and then sent to the server. This uses the client/server bandwidth extensively through a COPY. pgbench uses the FREEZE option with version 14 or later of PostgreSQL to speed up subsequent VACUUM, unless partitions are enabled. Using g causes logging to print one message every 100,000 rows while generating data for the pgbench_accounts table. + With `G` (server-side data generation), only small queries are sent from the pgbench client and then data is actually generated in the server. No significant bandwidth is required for this variant, but the server will do more work. Using G causes logging not to print any progress message while generating data. + The default initialization behavior uses client-side data generation (equivalent to g). + * `v` (Vacuum): Invoke VACUUM on the standard tables. + * `p` (create Primary keys): Create primary key indexes on the standard tables. + * `f` (create Foreign keys): Create foreign key constraints between the standard tables. (Note that this step is not performed by default.) + fillfactor: + type: integer + description: | + Create the pgbench_accounts, pgbench_tellers and pgbench_branches tables with the given fillfactor. Default is 100. + noVacuum: + type: boolean + description: | + Perform no vacuuming during initialization. (This option suppresses the `v` initialization step, even if it was specified in `initSteps`.) + samplingSGDbOps: + type: string + description: benchmark SGDbOps of type sampling that will be used to replay sampled queries. + custom: + type: object + description: This section allow to configure custom SQL for initialization and scripts used by pgbench. + properties: + initialization: + type: object + description: | + The custom SQL for initialization that will be executed in place of pgbench default initialization. + + If not specified the default pgbench initialization will be performed instead. + properties: &script-properties + script: + type: string + description: | + Raw SQL script to execute. This field is mutually exclusive with `scriptFrom` field. + scriptFrom: + type: object + description: | + Reference to either a Kubernetes [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) or a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) that contains the SQL script to execute. This field is mutually exclusive with `script` field. + + Fields `secretKeyRef` and `configMapKeyRef` are mutually exclusive, and one of them is required. + properties: + secretKeyRef: + type: object + description: | + A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) that contains the SQL script to execute. This field is mutually exclusive with `configMapKeyRef` field. + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. Must be a valid secret key. + configMapKeyRef: + type: object + description: | + A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) reference that contains the SQL script to execute. This field is mutually exclusive with `secretKeyRef` field. + properties: + name: + type: string + description: | + The name of the ConfigMap that contains the SQL script to execute. + key: + type: string + description: | + The key name within the ConfigMap that contains the SQL script to execute. + scripts: + type: array + description: The custom SQL scripts that will be executed by pgbench during the benchmark instead of default pgbench scripts + minItems: 1 + items: + type: object + description: A custom SQL script that will be executed by pgbench during the benchmark instead of default pgbench scripts + properties: + !!merge <<: *script-properties + builtin: + type: string + description: | + The name of the builtin script to use. See https://www.postgresql.org/docs/current/pgbench.html#PGBENCH-OPTION-BUILTIN + + When specified fields `replay`, `script` and `scriptFrom` must not be set. + replay: + type: integer + description: | + The index of the query in the sampling benchmark SGDbOps's status (index start from 0). + + When specified fields `builtin`, `script` and `scriptFrom` must not be set. + weight: + type: integer + description: The weight of this custom SQL script. required: ["databaseSize", "duration"] connectionType: type: string @@ -637,14 +370,14 @@ components: description: | If true selects "full" vacuum, which can reclaim more space, but takes much longer and exclusively locks the table. This method also requires extra disk space, since it writes a new copy of the table and doesn't release the old copy - until the operation is complete. Usually this should only be used when a significant amount of space needs to be - reclaimed from within the table. Defaults to: `false`. + until the operation is complete. Usually this should only be used when a significant amount of space needs to be + reclaimed from within the table. Defaults to: `false`. freeze: type: boolean description: | If true selects aggressive "freezing" of tuples. Specifying FREEZE is equivalent to performing VACUUM with the - vacuum_freeze_min_age and vacuum_freeze_table_age parameters set to zero. Aggressive freezing is always performed - when the table is rewritten, so this option is redundant when FULL is specified. Defaults to: `false`. + vacuum_freeze_min_age and vacuum_freeze_table_age parameters set to zero. Aggressive freezing is always performed + when the table is rewritten, so this option is redundant when FULL is specified. Defaults to: `false`. analyze: type: boolean description: | @@ -653,11 +386,11 @@ components: type: boolean description: | Normally, VACUUM will skip pages based on the visibility map. Pages where all tuples are known to be frozen can always be - skipped, and those where all tuples are known to be visible to all transactions may be skipped except when performing an - aggressive vacuum. Furthermore, except when performing an aggressive vacuum, some pages may be skipped in order to avoid - waiting for other sessions to finish using them. This option disables all page-skipping behavior, and is intended to be - used only when the contents of the visibility map are suspect, which should happen only if there is a hardware or - software issue causing database corruption. Defaults to: `false`. + skipped, and those where all tuples are known to be visible to all transactions may be skipped except when performing an + aggressive vacuum. Furthermore, except when performing an aggressive vacuum, some pages may be skipped in order to avoid + waiting for other sessions to finish using them. This option disables all page-skipping behavior, and is intended to be + used only when the contents of the visibility map are suspect, which should happen only if there is a hardware or + software issue causing database corruption. Defaults to: `false`. databases: type: array description: | @@ -674,14 +407,14 @@ components: description: | If true selects "full" vacuum, which can reclaim more space, but takes much longer and exclusively locks the table. This method also requires extra disk space, since it writes a new copy of the table and doesn't release the old copy - until the operation is complete. Usually this should only be used when a significant amount of space needs to be - reclaimed from within the table. Defaults to: `false`. + until the operation is complete. Usually this should only be used when a significant amount of space needs to be + reclaimed from within the table. Defaults to: `false`. freeze: type: boolean description: | If true selects aggressive "freezing" of tuples. Specifying FREEZE is equivalent to performing VACUUM with the - vacuum_freeze_min_age and vacuum_freeze_table_age parameters set to zero. Aggressive freezing is always performed - when the table is rewritten, so this option is redundant when FULL is specified. Defaults to: `false`. + vacuum_freeze_min_age and vacuum_freeze_table_age parameters set to zero. Aggressive freezing is always performed + when the table is rewritten, so this option is redundant when FULL is specified. Defaults to: `false`. analyze: type: boolean description: | @@ -690,11 +423,11 @@ components: type: boolean description: | Normally, VACUUM will skip pages based on the visibility map. Pages where all tuples are known to be frozen can always be - skipped, and those where all tuples are known to be visible to all transactions may be skipped except when performing an - aggressive vacuum. Furthermore, except when performing an aggressive vacuum, some pages may be skipped in order to avoid - waiting for other sessions to finish using them. This option disables all page-skipping behavior, and is intended to be - used only when the contents of the visibility map are suspect, which should happen only if there is a hardware or - software issue causing database corruption. Defaults to: `false`. + skipped, and those where all tuples are known to be visible to all transactions may be skipped except when performing an + aggressive vacuum. Furthermore, except when performing an aggressive vacuum, some pages may be skipped in order to avoid + waiting for other sessions to finish using them. This option disables all page-skipping behavior, and is intended to be + used only when the contents of the visibility map are suspect, which should happen only if there is a hardware or + software issue causing database corruption. Defaults to: `false`. repack: type: object description: | @@ -760,6 +493,34 @@ components: type: string description: | The target postgres version that must have the same major version of the target SGCluster. + postgresExtensions: + type: array + description: | + A major version upgrade can not be performed if a required extension is not present for the target major version of the upgrade. + In those cases you will have to provide the target extension version of the extension for the target major version of postgres. + Beware that in some cases it is not possible to upgrade an extension alongside postgres. This is the case for PostGIS or timescaledb. + In such cases you will have to upgrade the extension before or after the major version upgrade. Please make sure you read the + documentation of each extension in order to understand if it is possible to upgrade it during a major version upgrade of postgres. + items: + type: object + properties: + name: + type: string + description: The name of the extension to deploy. + publisher: + type: string + description: The id of the publisher of the extension to deploy. If not specified `com.ongres` will be used by default. + default: com.ongres + version: + type: string + description: The version of the extension to deploy. If not specified version of `stable` channel will be used by default and if only a version is available that one will be used. + repository: + type: string + description: | + The repository base URL from where to obtain the extension to deploy. + + **This section is filled by the operator.** + required: ["name"] sgPostgresConfig: type: string description: | @@ -771,8 +532,8 @@ components: When provided will indicate were the backups and WAL files will be stored. - The path should be different from the current `.spec.configurations.backupPath` value for the target `SGCluster` - in order to avoid mixing WAL files of two distinct major versions of postgres. + The path should be different from the current `.spec.configurations.backups[].path` value for the target `SGCluster` + in order to avoid mixing WAL files of two distinct major versions of postgres. link: type: boolean description: | @@ -782,15 +543,49 @@ components: description: | If true use efficient file cloning (also known as "reflinks" on some systems) instead of copying files to the new cluster. This can result in near-instantaneous copying of the data files, giving the speed advantages of `link` while leaving the old - cluster untouched. This option is mutually exclusive with `link`. Defaults to: `false`. + cluster untouched. This option is mutually exclusive with `link`. Defaults to: `false`. File cloning is only supported on some operating systems and file systems. If it is selected but not supported, the pg_upgrade - run will error. At present, it is supported on Linux (kernel 4.5 or later) with Btrfs and XFS (on file systems created with - reflink support), and on macOS with APFS. + run will error. At present, it is supported on Linux (kernel 4.5 or later) with Btrfs and XFS (on file systems created with + reflink support), and on macOS with APFS. check: type: boolean description: | If true does some checks to see if the cluster can perform a major version upgrade without changing any data. Defaults to: `false`. + toInstallPostgresExtensions: + type: array + description: | + The list of Postgres extensions to install. + + **This section is filled by the operator.** + items: + type: object + properties: + name: + type: string + description: The name of the extension to install. + publisher: + type: string + description: The id of the publisher of the extension to install. + version: + type: string + description: The version of the extension to install. + repository: + type: string + description: The repository base URL from where the extension will be installed from. + postgresVersion: + type: string + description: The postgres major version of the extension to install. + build: + type: string + description: The build version of the extension to install. + extraMounts: + type: array + description: The extra mounts of the extension to install. + items: + type: string + description: The extra mount of the installed extension. + required: ["name", "publisher", "version", "repository", "postgresVersion"] restart: type: object description: | @@ -810,7 +605,7 @@ components: type: boolean description: | By default all Pods are restarted. Setting this option to `true` allow to restart only those Pods which - are in pending restart state as detected by the operation. Defaults to: `false`. + are in pending restart state as detected by the operation. Defaults to: `false`. minorVersionUpgrade: type: object description: | @@ -888,10 +683,42 @@ components: description: | The results of the benchmark properties: + sampling: + type: object + description: The results of the sampling benchmark + properties: + topQueries: + type: array + description: The top queries sampled with the stats from pg_stat_statements. If is omitted if `omitTopQueriesInStatus` is set to `true`. + items: + type: object + properties: + id: + type: string + description: The query id of the representative statement calculated by Postgres + stats: + type: object + description: stats collected by the top queries query + additionalProperties: + type: string + queries: + type: array + description: The queries sampled. + items: + type: object + properties: + id: + type: string + description: The query id of the representative statement calculated by Postgres + query: + type: string + description: A sampled SQL query + timestamp: + type: string + description: The sampled query timestamp pgbench: type: object - description: | - The results of the pgbench benchmark + description: The results of the pgbench benchmark properties: scaleFactor: type: number @@ -922,7 +749,7 @@ components: type: string nullable: false description: | - The latency measure unit represented in milliseconds + The latency measure unit standardDeviation: type: object description: | @@ -937,7 +764,7 @@ components: type: string nullable: false description: | - The latency measure unit represented in milliseconds + The latency measure unit transactionsPerSecond: type: object description: | @@ -946,33 +773,71 @@ components: includingConnectionsEstablishing: type: object description: | - Number of Transaction Per Second (tps) including connection establishing. + Number of Transactions Per Second (tps) including connection establishing. properties: value: type: number - nullable: true description: | - The Transaction Per Second (tps) including connections establishing value + The Transactions Per Second (tps) including connections establishing value unit: type: string - nullable: false description: | - Transaction Per Second (tps) measure + Transactions Per Second (tps) measure unit excludingConnectionsEstablishing: type: object description: | - Number of Transaction Per Second (tps) excluding connection establishing. + Number of Transactions Per Second (tps) excluding connection establishing. properties: value: type: number nullable: true description: | - The Transaction Per Second (tps) excluding connections establishing value + The Transactions Per Second (tps) excluding connections establishing value unit: type: string nullable: false description: | - Transaction Per Second (tps) measure + Transactions Per Second (tps) measure unit + overTime: + type: object + description: The Transactions Per Second (tps) values aggregated over unit of time + properties: + valuesUnit: + type: string + description: The Transactions Per Second (tps) measures unit + values: + type: array + description: The Transactions Per Second (tps) values aggregated over unit of time + items: + type: number + intervalDurationUnit: + type: string + description: The interval duration measure unit + intervalDuration: + type: number + description: The interval duration used to aggregate the transactions per second. + statements: + type: array + description: Average per-statement latency (execution time from the perspective of the client) of each command after the benchmark finishes + items: + type: object + description: Average per-statement latency (execution time from the perspective of the client) of a command after the benchmark finishes + properties: + script: + type: integer + description: The script index (`0` if no custom scripts have been defined) + command: + type: string + description: The command + latency: + type: number + description: Average latency of the command + unit: + type: string + description: The average latency measure unit + hdrHistogram: + type: string + description: Compressed and base 64 encoded HdrHistogram majorVersionUpgrade: type: object description: | @@ -1008,6 +873,10 @@ components: The instances that have been restarted items: type: string + phase: + type: string + description: | + The phase the operation is or was executing) failure: type: string description: | diff --git a/apis/stackgres/v1/zz_generated.deepcopy.go b/apis/stackgres/v1/zz_generated.deepcopy.go index da94252106..cf527aca66 100644 --- a/apis/stackgres/v1/zz_generated.deepcopy.go +++ b/apis/stackgres/v1/zz_generated.deepcopy.go @@ -7980,11 +7980,26 @@ func (in *SGDbOpsSpecBenchmark) DeepCopyInto(out *SGDbOpsSpecBenchmark) { *out = new(string) **out = **in } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(SGDbOpsSpecBenchmarkCredentials) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } if in.Pgbench != nil { in, out := &in.Pgbench, &out.Pgbench *out = new(SGDbOpsSpecBenchmarkPgbench) (*in).DeepCopyInto(*out) } + if in.Sampling != nil { + in, out := &in.Sampling, &out.Sampling + *out = new(SGDbOpsSpecBenchmarkSampling) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmark. @@ -7997,6 +8012,53 @@ func (in *SGDbOpsSpecBenchmark) DeepCopy() *SGDbOpsSpecBenchmark { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkCredentials) DeepCopyInto(out *SGDbOpsSpecBenchmarkCredentials) { + *out = *in + out.Password = in.Password + out.Username = in.Username +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkCredentials. +func (in *SGDbOpsSpecBenchmarkCredentials) DeepCopy() *SGDbOpsSpecBenchmarkCredentials { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkCredentials) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkCredentialsPassword) DeepCopyInto(out *SGDbOpsSpecBenchmarkCredentialsPassword) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkCredentialsPassword. +func (in *SGDbOpsSpecBenchmarkCredentialsPassword) DeepCopy() *SGDbOpsSpecBenchmarkCredentialsPassword { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkCredentialsPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkCredentialsUsername) DeepCopyInto(out *SGDbOpsSpecBenchmarkCredentialsUsername) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkCredentialsUsername. +func (in *SGDbOpsSpecBenchmarkCredentialsUsername) DeepCopy() *SGDbOpsSpecBenchmarkCredentialsUsername { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkCredentialsUsername) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SGDbOpsSpecBenchmarkPgbench) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgbench) { *out = *in @@ -8005,11 +8067,71 @@ func (in *SGDbOpsSpecBenchmarkPgbench) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgb *out = new(int) **out = **in } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(SGDbOpsSpecBenchmarkPgbenchCustom) + (*in).DeepCopyInto(*out) + } + if in.Fillfactor != nil { + in, out := &in.Fillfactor, &out.Fillfactor + *out = new(int) + **out = **in + } + if in.ForeignKeys != nil { + in, out := &in.ForeignKeys, &out.ForeignKeys + *out = new(bool) + **out = **in + } + if in.InitSteps != nil { + in, out := &in.InitSteps, &out.InitSteps + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.NoVacuum != nil { + in, out := &in.NoVacuum, &out.NoVacuum + *out = new(bool) + **out = **in + } + if in.PartitionMethod != nil { + in, out := &in.PartitionMethod, &out.PartitionMethod + *out = new(string) + **out = **in + } + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = new(int) + **out = **in + } + if in.QueryMode != nil { + in, out := &in.QueryMode, &out.QueryMode + *out = new(string) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float32) + **out = **in + } + if in.SamplingSGDbOps != nil { + in, out := &in.SamplingSGDbOps, &out.SamplingSGDbOps + *out = new(string) + **out = **in + } if in.Threads != nil { in, out := &in.Threads, &out.Threads *out = new(int) **out = **in } + if in.UnloggedTables != nil { + in, out := &in.UnloggedTables, &out.UnloggedTables + *out = new(bool) + **out = **in + } if in.UsePreparedStatements != nil { in, out := &in.UsePreparedStatements, &out.UsePreparedStatements *out = new(bool) @@ -8017,57 +8139,439 @@ func (in *SGDbOpsSpecBenchmarkPgbench) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgb } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkPgbench. -func (in *SGDbOpsSpecBenchmarkPgbench) DeepCopy() *SGDbOpsSpecBenchmarkPgbench { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkPgbench. +func (in *SGDbOpsSpecBenchmarkPgbench) DeepCopy() *SGDbOpsSpecBenchmarkPgbench { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkPgbench) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkPgbenchCustom) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgbenchCustom) { + *out = *in + if in.Initialization != nil { + in, out := &in.Initialization, &out.Initialization + *out = new(SGDbOpsSpecBenchmarkPgbenchCustomInitialization) + (*in).DeepCopyInto(*out) + } + if in.Scripts != nil { + in, out := &in.Scripts, &out.Scripts + *out = new([]SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem) + if **in != nil { + in, out := *in, *out + *out = make([]SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkPgbenchCustom. +func (in *SGDbOpsSpecBenchmarkPgbenchCustom) DeepCopy() *SGDbOpsSpecBenchmarkPgbenchCustom { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkPgbenchCustom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomInitialization) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgbenchCustomInitialization) { + *out = *in + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.ScriptFrom != nil { + in, out := &in.ScriptFrom, &out.ScriptFrom + *out = new(SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkPgbenchCustomInitialization. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomInitialization) DeepCopy() *SGDbOpsSpecBenchmarkPgbenchCustomInitialization { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkPgbenchCustomInitialization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFrom) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFrom) { + *out = *in + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromConfigMapKeyRef) + (*in).DeepCopyInto(*out) + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromSecretKeyRef) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFrom. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFrom) DeepCopy() *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFrom { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromConfigMapKeyRef) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromConfigMapKeyRef) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromConfigMapKeyRef. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromConfigMapKeyRef) DeepCopy() *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromConfigMapKeyRef { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromConfigMapKeyRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromSecretKeyRef) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromSecretKeyRef) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromSecretKeyRef. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromSecretKeyRef) DeepCopy() *SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromSecretKeyRef { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkPgbenchCustomInitializationScriptFromSecretKeyRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem) { + *out = *in + if in.Builtin != nil { + in, out := &in.Builtin, &out.Builtin + *out = new(string) + **out = **in + } + if in.Replay != nil { + in, out := &in.Replay, &out.Replay + *out = new(int) + **out = **in + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.ScriptFrom != nil { + in, out := &in.ScriptFrom, &out.ScriptFrom + *out = new(SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFrom) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem) DeepCopy() *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkPgbenchCustomScriptsItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFrom) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFrom) { + *out = *in + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromConfigMapKeyRef) + (*in).DeepCopyInto(*out) + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromSecretKeyRef) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFrom. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFrom) DeepCopy() *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFrom { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromConfigMapKeyRef) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromConfigMapKeyRef) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromConfigMapKeyRef. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromConfigMapKeyRef) DeepCopy() *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromConfigMapKeyRef { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromConfigMapKeyRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromSecretKeyRef) DeepCopyInto(out *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromSecretKeyRef) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromSecretKeyRef. +func (in *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromSecretKeyRef) DeepCopy() *SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromSecretKeyRef { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkPgbenchCustomScriptsItemScriptFromSecretKeyRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecBenchmarkSampling) DeepCopyInto(out *SGDbOpsSpecBenchmarkSampling) { + *out = *in + if in.CustomTopQueriesQuery != nil { + in, out := &in.CustomTopQueriesQuery, &out.CustomTopQueriesQuery + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.OmitTopQueriesInStatus != nil { + in, out := &in.OmitTopQueriesInStatus, &out.OmitTopQueriesInStatus + *out = new(bool) + **out = **in + } + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = new(int) + **out = **in + } + if in.SamplingMinInterval != nil { + in, out := &in.SamplingMinInterval, &out.SamplingMinInterval + *out = new(int) + **out = **in + } + if in.TopQueriesFilter != nil { + in, out := &in.TopQueriesFilter, &out.TopQueriesFilter + *out = new(string) + **out = **in + } + if in.TopQueriesMin != nil { + in, out := &in.TopQueriesMin, &out.TopQueriesMin + *out = new(int) + **out = **in + } + if in.TopQueriesPercentile != nil { + in, out := &in.TopQueriesPercentile, &out.TopQueriesPercentile + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecBenchmarkSampling. +func (in *SGDbOpsSpecBenchmarkSampling) DeepCopy() *SGDbOpsSpecBenchmarkSampling { + if in == nil { + return nil + } + out := new(SGDbOpsSpecBenchmarkSampling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecMajorVersionUpgrade) DeepCopyInto(out *SGDbOpsSpecMajorVersionUpgrade) { + *out = *in + if in.BackupPath != nil { + in, out := &in.BackupPath, &out.BackupPath + *out = new(string) + **out = **in + } + if in.Check != nil { + in, out := &in.Check, &out.Check + *out = new(bool) + **out = **in + } + if in.Clone != nil { + in, out := &in.Clone, &out.Clone + *out = new(bool) + **out = **in + } + if in.Link != nil { + in, out := &in.Link, &out.Link + *out = new(bool) + **out = **in + } + if in.PostgresExtensions != nil { + in, out := &in.PostgresExtensions, &out.PostgresExtensions + *out = new([]SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem) + if **in != nil { + in, out := *in, *out + *out = make([]SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } + if in.PostgresVersion != nil { + in, out := &in.PostgresVersion, &out.PostgresVersion + *out = new(string) + **out = **in + } + if in.SgPostgresConfig != nil { + in, out := &in.SgPostgresConfig, &out.SgPostgresConfig + *out = new(string) + **out = **in + } + if in.ToInstallPostgresExtensions != nil { + in, out := &in.ToInstallPostgresExtensions, &out.ToInstallPostgresExtensions + *out = new([]SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem) + if **in != nil { + in, out := *in, *out + *out = make([]SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecMajorVersionUpgrade. +func (in *SGDbOpsSpecMajorVersionUpgrade) DeepCopy() *SGDbOpsSpecMajorVersionUpgrade { + if in == nil { + return nil + } + out := new(SGDbOpsSpecMajorVersionUpgrade) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem) DeepCopyInto(out *SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem) { + *out = *in + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem. +func (in *SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem) DeepCopy() *SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem { if in == nil { return nil } - out := new(SGDbOpsSpecBenchmarkPgbench) + out := new(SGDbOpsSpecMajorVersionUpgradePostgresExtensionsItem) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SGDbOpsSpecMajorVersionUpgrade) DeepCopyInto(out *SGDbOpsSpecMajorVersionUpgrade) { +func (in *SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem) DeepCopyInto(out *SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem) { *out = *in - if in.BackupPath != nil { - in, out := &in.BackupPath, &out.BackupPath - *out = new(string) - **out = **in - } - if in.Check != nil { - in, out := &in.Check, &out.Check - *out = new(bool) - **out = **in - } - if in.Clone != nil { - in, out := &in.Clone, &out.Clone - *out = new(bool) - **out = **in - } - if in.Link != nil { - in, out := &in.Link, &out.Link - *out = new(bool) - **out = **in - } - if in.PostgresVersion != nil { - in, out := &in.PostgresVersion, &out.PostgresVersion + if in.Build != nil { + in, out := &in.Build, &out.Build *out = new(string) **out = **in } - if in.SgPostgresConfig != nil { - in, out := &in.SgPostgresConfig, &out.SgPostgresConfig - *out = new(string) - **out = **in + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecMajorVersionUpgrade. -func (in *SGDbOpsSpecMajorVersionUpgrade) DeepCopy() *SGDbOpsSpecMajorVersionUpgrade { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem. +func (in *SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem) DeepCopy() *SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem { if in == nil { return nil } - out := new(SGDbOpsSpecMajorVersionUpgrade) + out := new(SGDbOpsSpecMajorVersionUpgradeToInstallPostgresExtensionsItem) in.DeepCopyInto(out) return out } @@ -8238,6 +8742,11 @@ func (in *SGDbOpsSpecScheduling) DeepCopyInto(out *SGDbOpsSpecScheduling) { *out = new(SGDbOpsSpecSchedulingPodAntiAffinity) (*in).DeepCopyInto(*out) } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations *out = new([]SGDbOpsSpecSchedulingTolerationsItem) @@ -8561,6 +9070,24 @@ func (in *SGDbOpsSpecSchedulingPodAffinityPreferredDuringSchedulingIgnoredDuring *out = new(SGDbOpsSpecSchedulingPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItemPodAffinityTermLabelSelector) (*in).DeepCopyInto(*out) } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.MismatchLabelKeys != nil { + in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } if in.NamespaceSelector != nil { in, out := &in.NamespaceSelector, &out.NamespaceSelector *out = new(SGDbOpsSpecSchedulingPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItemPodAffinityTermNamespaceSelector) @@ -8709,6 +9236,24 @@ func (in *SGDbOpsSpecSchedulingPodAffinityRequiredDuringSchedulingIgnoredDuringE *out = new(SGDbOpsSpecSchedulingPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionItemLabelSelector) (*in).DeepCopyInto(*out) } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.MismatchLabelKeys != nil { + in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } if in.NamespaceSelector != nil { in, out := &in.NamespaceSelector, &out.NamespaceSelector *out = new(SGDbOpsSpecSchedulingPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionItemNamespaceSelector) @@ -8910,6 +9455,24 @@ func (in *SGDbOpsSpecSchedulingPodAntiAffinityPreferredDuringSchedulingIgnoredDu *out = new(SGDbOpsSpecSchedulingPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItemPodAffinityTermLabelSelector) (*in).DeepCopyInto(*out) } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.MismatchLabelKeys != nil { + in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } if in.NamespaceSelector != nil { in, out := &in.NamespaceSelector, &out.NamespaceSelector *out = new(SGDbOpsSpecSchedulingPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItemPodAffinityTermNamespaceSelector) @@ -9058,6 +9621,24 @@ func (in *SGDbOpsSpecSchedulingPodAntiAffinityRequiredDuringSchedulingIgnoredDur *out = new(SGDbOpsSpecSchedulingPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionItemLabelSelector) (*in).DeepCopyInto(*out) } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.MismatchLabelKeys != nil { + in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } if in.NamespaceSelector != nil { in, out := &in.NamespaceSelector, &out.NamespaceSelector *out = new(SGDbOpsSpecSchedulingPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionItemNamespaceSelector) @@ -9408,6 +9989,11 @@ func (in *SGDbOpsStatusBenchmark) DeepCopyInto(out *SGDbOpsStatusBenchmark) { *out = new(SGDbOpsStatusBenchmarkPgbench) (*in).DeepCopyInto(*out) } + if in.Sampling != nil { + in, out := &in.Sampling, &out.Sampling + *out = new(SGDbOpsStatusBenchmarkSampling) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsStatusBenchmark. @@ -9423,6 +10009,11 @@ func (in *SGDbOpsStatusBenchmark) DeepCopy() *SGDbOpsStatusBenchmark { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SGDbOpsStatusBenchmarkPgbench) DeepCopyInto(out *SGDbOpsStatusBenchmarkPgbench) { *out = *in + if in.HdrHistogram != nil { + in, out := &in.HdrHistogram, &out.HdrHistogram + *out = new(string) + **out = **in + } if in.Latency != nil { in, out := &in.Latency, &out.Latency *out = new(SGDbOpsStatusBenchmarkPgbenchLatency) @@ -9433,6 +10024,17 @@ func (in *SGDbOpsStatusBenchmarkPgbench) DeepCopyInto(out *SGDbOpsStatusBenchmar *out = new(float32) **out = **in } + if in.Statements != nil { + in, out := &in.Statements, &out.Statements + *out = new([]SGDbOpsStatusBenchmarkPgbenchStatementsItem) + if **in != nil { + in, out := *in, *out + *out = make([]SGDbOpsStatusBenchmarkPgbenchStatementsItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } if in.TransactionsPerSecond != nil { in, out := &in.TransactionsPerSecond, &out.TransactionsPerSecond *out = new(SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecond) @@ -9530,6 +10132,41 @@ func (in *SGDbOpsStatusBenchmarkPgbenchLatencyStandardDeviation) DeepCopy() *SGD return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsStatusBenchmarkPgbenchStatementsItem) DeepCopyInto(out *SGDbOpsStatusBenchmarkPgbenchStatementsItem) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = new(string) + **out = **in + } + if in.Latency != nil { + in, out := &in.Latency, &out.Latency + *out = new(float32) + **out = **in + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(int) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsStatusBenchmarkPgbenchStatementsItem. +func (in *SGDbOpsStatusBenchmarkPgbenchStatementsItem) DeepCopy() *SGDbOpsStatusBenchmarkPgbenchStatementsItem { + if in == nil { + return nil + } + out := new(SGDbOpsStatusBenchmarkPgbenchStatementsItem) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecond) DeepCopyInto(out *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecond) { *out = *in @@ -9543,6 +10180,11 @@ func (in *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecond) DeepCopyInto(out * *out = new(SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondIncludingConnectionsEstablishing) (*in).DeepCopyInto(*out) } + if in.OverTime != nil { + in, out := &in.OverTime, &out.OverTime + *out = new(SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondOverTime) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecond. @@ -9605,6 +10247,139 @@ func (in *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondIncludingConnections return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondOverTime) DeepCopyInto(out *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondOverTime) { + *out = *in + if in.IntervalDuration != nil { + in, out := &in.IntervalDuration, &out.IntervalDuration + *out = new(float32) + **out = **in + } + if in.IntervalDurationUnit != nil { + in, out := &in.IntervalDurationUnit, &out.IntervalDurationUnit + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = new([]float32) + if **in != nil { + in, out := *in, *out + *out = make([]float32, len(*in)) + copy(*out, *in) + } + } + if in.ValuesUnit != nil { + in, out := &in.ValuesUnit, &out.ValuesUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondOverTime. +func (in *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondOverTime) DeepCopy() *SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondOverTime { + if in == nil { + return nil + } + out := new(SGDbOpsStatusBenchmarkPgbenchTransactionsPerSecondOverTime) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsStatusBenchmarkSampling) DeepCopyInto(out *SGDbOpsStatusBenchmarkSampling) { + *out = *in + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = new([]SGDbOpsStatusBenchmarkSamplingQueriesItem) + if **in != nil { + in, out := *in, *out + *out = make([]SGDbOpsStatusBenchmarkSamplingQueriesItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } + if in.TopQueries != nil { + in, out := &in.TopQueries, &out.TopQueries + *out = new([]SGDbOpsStatusBenchmarkSamplingTopQueriesItem) + if **in != nil { + in, out := *in, *out + *out = make([]SGDbOpsStatusBenchmarkSamplingTopQueriesItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsStatusBenchmarkSampling. +func (in *SGDbOpsStatusBenchmarkSampling) DeepCopy() *SGDbOpsStatusBenchmarkSampling { + if in == nil { + return nil + } + out := new(SGDbOpsStatusBenchmarkSampling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsStatusBenchmarkSamplingQueriesItem) DeepCopyInto(out *SGDbOpsStatusBenchmarkSamplingQueriesItem) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsStatusBenchmarkSamplingQueriesItem. +func (in *SGDbOpsStatusBenchmarkSamplingQueriesItem) DeepCopy() *SGDbOpsStatusBenchmarkSamplingQueriesItem { + if in == nil { + return nil + } + out := new(SGDbOpsStatusBenchmarkSamplingQueriesItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGDbOpsStatusBenchmarkSamplingTopQueriesItem) DeepCopyInto(out *SGDbOpsStatusBenchmarkSamplingTopQueriesItem) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.Stats != nil { + in, out := &in.Stats, &out.Stats + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGDbOpsStatusBenchmarkSamplingTopQueriesItem. +func (in *SGDbOpsStatusBenchmarkSamplingTopQueriesItem) DeepCopy() *SGDbOpsStatusBenchmarkSamplingTopQueriesItem { + if in == nil { + return nil + } + out := new(SGDbOpsStatusBenchmarkSamplingTopQueriesItem) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SGDbOpsStatusConditionsItem) DeepCopyInto(out *SGDbOpsStatusConditionsItem) { *out = *in @@ -9671,6 +10446,11 @@ func (in *SGDbOpsStatusMajorVersionUpgrade) DeepCopyInto(out *SGDbOpsStatusMajor copy(*out, *in) } } + if in.Phase != nil { + in, out := &in.Phase, &out.Phase + *out = new(string) + **out = **in + } if in.PrimaryInstance != nil { in, out := &in.PrimaryInstance, &out.PrimaryInstance *out = new(string) diff --git a/apis/stackgres/v1beta1/sgobjectstorage.gen.go b/apis/stackgres/v1beta1/sgobjectstorage.gen.go index ea160e6fb8..099d41702e 100644 --- a/apis/stackgres/v1beta1/sgobjectstorage.gen.go +++ b/apis/stackgres/v1beta1/sgobjectstorage.gen.go @@ -37,7 +37,7 @@ type SGObjectStorageSpecAzureBlob struct { // SGObjectStorageSpecAzureBlobAzureCredentials defines model for SGObjectStorageSpecAzureBlobAzureCredentials. type SGObjectStorageSpecAzureBlobAzureCredentials struct { - // Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `azureCredentials`. . Note that you may use the same or different Secrets for the `storageAccount` and the `accessKey`. In the former case, the `keys` that identify each must be, obviously, different. + // Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `azureCredentials`. . Note that you may use the same or different Secrets for the `storageAccount` and the `accessKey`. In the former case, the `keys` that identify each must be, obviously, different. SecretKeySelectors *SGObjectStorageSpecAzureBlobAzureCredentialsSecretKeySelectors `json:"secretKeySelectors,omitempty"` } @@ -84,7 +84,7 @@ type SGObjectStorageSpecGcsGcpCredentials struct { // This is useful when running StackGres inside a GKE cluster using [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). FetchCredentialsFromMetadataService *bool `json:"fetchCredentialsFromMetadataService,omitempty"` - // A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core) to reference the Secrets that contain the information about the Service Account to access GCS. + // A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) to reference the Secrets that contain the information about the Service Account to access GCS. SecretKeySelectors *SGObjectStorageSpecGcsGcpCredentialsSecretKeySelectors `json:"secretKeySelectors,omitempty"` } @@ -120,7 +120,7 @@ type SGObjectStorageSpecS3 struct { // SGObjectStorageSpecS3AwsCredentials defines model for SGObjectStorageSpecS3AwsCredentials. type SGObjectStorageSpecS3AwsCredentials struct { - // Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core)(s) to reference the Secrets that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. + // Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) to reference the Secrets that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. SecretKeySelectors SGObjectStorageSpecS3AwsCredentialsSecretKeySelectors `json:"secretKeySelectors"` } @@ -177,7 +177,7 @@ type SGObjectStorageSpecS3Compatible struct { // SGObjectStorageSpecS3CompatibleAwsCredentials defines model for SGObjectStorageSpecS3CompatibleAwsCredentials. type SGObjectStorageSpecS3CompatibleAwsCredentials struct { - // Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. + // Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. SecretKeySelectors SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectors `json:"secretKeySelectors"` } @@ -186,6 +186,9 @@ type SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectors struct { // AWS [access key ID](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). For example, `AKIAIOSFODNN7EXAMPLE`. AccessKeyId SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsAccessKeyId `json:"accessKeyId"` + // CA Certificate file to be used when connecting to the S3 Compatible Service. + CaCertificate *SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsCaCertificate `json:"caCertificate,omitempty"` + // AWS [secret access key](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). For example, `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`. SecretAccessKey SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsSecretAccessKey `json:"secretAccessKey"` } @@ -199,6 +202,15 @@ type SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsAccessKeyId Name string `json:"name"` } +// SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsCaCertificate defines model for SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsCaCertificate. +type SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsCaCertificate struct { + // The key of the secret to select from. Must be a valid secret key. + Key string `json:"key"` + + // Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + Name string `json:"name"` +} + // SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsSecretAccessKey defines model for SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsSecretAccessKey. type SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsSecretAccessKey struct { // The key of the secret to select from. Must be a valid secret key. diff --git a/apis/stackgres/v1beta1/sgobjectstorage.yaml b/apis/stackgres/v1beta1/sgobjectstorage.yaml index 52275e66a4..e1eb6d06b9 100644 --- a/apis/stackgres/v1beta1/sgobjectstorage.yaml +++ b/apis/stackgres/v1beta1/sgobjectstorage.yaml @@ -51,7 +51,7 @@ components: secretKeySelectors: type: object description: | - Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core)(s) to reference the Secrets that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. + Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) to reference the Secrets that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. properties: accessKeyId: type: object @@ -120,7 +120,7 @@ components: secretKeySelectors: type: object description: | - Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. + Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. properties: accessKeyId: type: object @@ -150,6 +150,20 @@ components: description: | Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). required: ["key", "name"] + caCertificate: + type: object + description: | + CA Certificate file to be used when connecting to the S3 Compatible Service. + properties: + key: + type: string + description: | + The key of the secret to select from. Must be a valid secret key. + name: + type: string + description: | + Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + required: ["key", "name"] required: ["accessKeyId", "secretAccessKey"] required: ["secretKeySelectors"] required: ["bucket", "awsCredentials"] @@ -177,7 +191,7 @@ components: secretKeySelectors: type: object description: | - A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core) to reference the Secrets that contain the information about the Service Account to access GCS. + A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) to reference the Secrets that contain the information about the Service Account to access GCS. properties: serviceAccountJSON: type: object @@ -213,7 +227,7 @@ components: secretKeySelectors: type: object description: | - Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `azureCredentials`. . Note that you may use the same or different Secrets for the `storageAccount` and the `accessKey`. In the former case, the `keys` that identify each must be, obviously, different. + Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `azureCredentials`. . Note that you may use the same or different Secrets for the `storageAccount` and the `accessKey`. In the former case, the `keys` that identify each must be, obviously, different. properties: storageAccount: type: object diff --git a/apis/stackgres/v1beta1/sgobjectstorage_crd.yaml b/apis/stackgres/v1beta1/sgobjectstorage_crd.yaml index a2bd2d522d..639c4aa362 100644 --- a/apis/stackgres/v1beta1/sgobjectstorage_crd.yaml +++ b/apis/stackgres/v1beta1/sgobjectstorage_crd.yaml @@ -13,259 +13,273 @@ spec: shortNames: - sgobjs versions: - - name: v1beta1 - served: true - storage: true - additionalPrinterColumns: - - name: type - type: string - jsonPath: .spec.type - schema: - openAPIV3Schema: - type: object - required: ["metadata", "spec"] - properties: - metadata: - type: object - properties: - name: - type: string - description: | - Name of the Object Storage configuration. - The name must be unique across all object storage configurations in the same namespace. - spec: - type: object - description: | - Object Storage configuration - properties: - type: - type: string - enum: ["s3", "s3Compatible", "gcs", "azureBlob"] - description: | - Determine the type of object storage used for storing the base backups and WAL segments. - Possible values: - * `s3`: Amazon Web Services S3 (Simple Storage Service). - * `s3Compatible`: non-AWS services that implement a compatibility API with AWS S3. - * `gcs`: Google Cloud Storage. - * `azureBlob`: Microsoft Azure Blob Storage. - s3: - type: object - description: | - Amazon Web Services S3 configuration. - properties: - bucket: - type: string - pattern: '^((s3|https?)://)?[^/]+(/[^/]*)*$' - description: | - AWS S3 bucket name. - region: - type: string - description: | - The AWS S3 region. The Region may be detected using s3:GetBucketLocation, but if you wish to avoid giving permissions to this API call or forbid it from the applicable IAM policy, you must then specify this property. - storageClass: - type: string - description: | - The [Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) to use for the backup object storage. By default, the `STANDARD` storage class is used. Other supported values include `STANDARD_IA` for Infrequent Access and `REDUCED_REDUNDANCY`. - awsCredentials: - type: object - description: | - The credentials to access AWS S3 for writing and reading. - properties: - secretKeySelectors: - type: object - description: | - Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core)(s) to reference the Secrets that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. - properties: - accessKeyId: - type: object - description: | - AWS [access key ID](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). For example, `AKIAIOSFODNN7EXAMPLE`. - properties: - key: - type: string - description: | - The key of the secret to select from. Must be a valid secret key. - name: - type: string - description: | - Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). - required: ["key", "name"] - secretAccessKey: - type: object - description: | - AWS [secret access key](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). For example, `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`. - properties: - key: - type: string - description: | - The key of the secret to select from. Must be a valid secret key. - name: - type: string - description: | - Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). - required: ["key", "name"] - required: ["accessKeyId", "secretAccessKey"] - required: ["secretKeySelectors"] - required: ["bucket", "awsCredentials"] - s3Compatible: - type: object - description: "AWS S3-Compatible API configuration" - properties: - bucket: - type: string - pattern: '^((s3|https?)://)?[^/]+(/[^/]*)*$' - description: | - Bucket name. - enablePathStyleAddressing: - type: boolean - description: | - Enable path-style addressing (i.e. `http://s3.amazonaws.com/BUCKET/KEY`) when connecting to an S3-compatible service that lacks support for sub-domain style bucket URLs (i.e. `http://BUCKET.s3.amazonaws.com/KEY`). - - Defaults to false. - endpoint: - type: string - description: | - Overrides the default url to connect to an S3-compatible service. - For example: `http://s3-like-service:9000`. - region: - type: string - description: | - The AWS S3 region. The Region may be detected using s3:GetBucketLocation, but if you wish to avoid giving permissions to this API call or forbid it from the applicable IAM policy, you must then specify this property. - storageClass: - type: string - description: | - The [Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) to use for the backup object storage. By default, the `STANDARD` storage class is used. Other supported values include `STANDARD_IA` for Infrequent Access and `REDUCED_REDUNDANCY`. - awsCredentials: - type: object - description: | - The credentials to access AWS S3 for writing and reading. - properties: - secretKeySelectors: - type: object - description: | - Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. - properties: - accessKeyId: - type: object - description: | - AWS [access key ID](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). For example, `AKIAIOSFODNN7EXAMPLE`. - properties: - key: - type: string - description: | - The key of the secret to select from. Must be a valid secret key. - name: - type: string - description: | - Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). - required: ["key", "name"] - secretAccessKey: - type: object - description: | - AWS [secret access key](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). For example, `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`. - properties: - key: - type: string - description: | - The key of the secret to select from. Must be a valid secret key. - name: - type: string - description: | - Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). - required: ["key", "name"] - required: ["accessKeyId", "secretAccessKey"] - required: ["secretKeySelectors"] - required: ["bucket", "awsCredentials"] - gcs: - type: object - description: | - Google Cloud Storage configuration. - properties: - bucket: - type: string - pattern: "^(gs://)?[^/]+(/[^/]*)*$" - description: | - GCS bucket name. - gcpCredentials: - type: object - description: | - The credentials to access GCS for writing and reading. - properties: - fetchCredentialsFromMetadataService: - type: boolean - description: | - If true, the credentials will be fetched from the GCE/GKE metadata service and the field `secretKeySelectors` have to be set to null or omitted. - - This is useful when running StackGres inside a GKE cluster using [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). - secretKeySelectors: - type: object - description: | - A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core) to reference the Secrets that contain the information about the Service Account to access GCS. - properties: - serviceAccountJSON: - type: object - description: | - A service account key from GCP. In JSON format, as downloaded from the GCP Console. - properties: - key: - type: string - description: | - The key of the secret to select from. Must be a valid secret key. - name: - type: string - description: | - Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). - required: [ "key", "name" ] - required: [ "serviceAccountJSON" ] - required: [ "bucket", "gcpCredentials" ] - azureBlob: - type: object - description: | - Azure Blob Storage configuration. - properties: - bucket: - type: string - pattern: "^(azure://)?[^/]+(/[^/]*)*$" - description: | - Azure Blob Storage bucket name. - azureCredentials: - type: object - description: | - The credentials to access Azure Blob Storage for writing and reading. - properties: - secretKeySelectors: - type: object - description: | - Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `azureCredentials`. . Note that you may use the same or different Secrets for the `storageAccount` and the `accessKey`. In the former case, the `keys` that identify each must be, obviously, different. - properties: - storageAccount: - type: object - description: | - The [Storage Account](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview?toc=/azure/storage/blobs/toc.json) that contains the Blob bucket to be used. - properties: - key: - type: string - description: | - The key of the secret to select from. Must be a valid secret key. - name: - type: string - description: | - Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). - required: [ "key", "name" ] - accessKey: - type: object - description: | - The [storage account access key](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal). - properties: - key: - type: string - description: | - The key of the secret to select from. Must be a valid secret key. - name: - type: string - description: | - Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). - required: [ "key", "name" ] - required: [ "storageAccount", "accessKey" ] - required: [ "bucket", "azureCredentials" ] - required: [ "type" ] + - name: v1beta1 + served: true + storage: true + additionalPrinterColumns: + - name: type + type: string + jsonPath: .spec.type + schema: + openAPIV3Schema: + type: object + required: ["metadata", "spec"] + properties: + metadata: + type: object + properties: + name: + type: string + description: | + Name of the Object Storage configuration. + The name must be unique across all object storage configurations in the same namespace. + spec: + type: object + description: | + Object Storage configuration + properties: + type: + type: string + enum: ["s3", "s3Compatible", "gcs", "azureBlob"] + description: | + Determine the type of object storage used for storing the base backups and WAL segments. + Possible values: + * `s3`: Amazon Web Services S3 (Simple Storage Service). + * `s3Compatible`: non-AWS services that implement a compatibility API with AWS S3. + * `gcs`: Google Cloud Storage. + * `azureBlob`: Microsoft Azure Blob Storage. + s3: + type: object + description: | + Amazon Web Services S3 configuration. + properties: + bucket: + type: string + pattern: '^((s3|https?)://)?[^/]+(/[^/]*)*$' + description: | + AWS S3 bucket name. + region: + type: string + description: | + The AWS S3 region. The Region may be detected using s3:GetBucketLocation, but if you wish to avoid giving permissions to this API call or forbid it from the applicable IAM policy, you must then specify this property. + storageClass: + type: string + description: | + The [Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) to use for the backup object storage. By default, the `STANDARD` storage class is used. Other supported values include `STANDARD_IA` for Infrequent Access and `REDUCED_REDUNDANCY`. + awsCredentials: + type: object + description: | + The credentials to access AWS S3 for writing and reading. + properties: + secretKeySelectors: + type: object + description: | + Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) to reference the Secrets that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. + properties: + accessKeyId: + type: object + description: | + AWS [access key ID](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). For example, `AKIAIOSFODNN7EXAMPLE`. + properties: + key: + type: string + description: | + The key of the secret to select from. Must be a valid secret key. + name: + type: string + description: | + Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + required: ["key", "name"] + secretAccessKey: + type: object + description: | + AWS [secret access key](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). For example, `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`. + properties: + key: + type: string + description: | + The key of the secret to select from. Must be a valid secret key. + name: + type: string + description: | + Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + required: ["key", "name"] + required: ["accessKeyId", "secretAccessKey"] + required: ["secretKeySelectors"] + required: ["bucket", "awsCredentials"] + s3Compatible: + type: object + description: "AWS S3-Compatible API configuration" + properties: + bucket: + type: string + pattern: '^((s3|https?)://)?[^/]+(/[^/]*)*$' + description: | + Bucket name. + enablePathStyleAddressing: + type: boolean + description: | + Enable path-style addressing (i.e. `http://s3.amazonaws.com/BUCKET/KEY`) when connecting to an S3-compatible service that lacks support for sub-domain style bucket URLs (i.e. `http://BUCKET.s3.amazonaws.com/KEY`). + + Defaults to false. + endpoint: + type: string + description: | + Overrides the default url to connect to an S3-compatible service. + For example: `http://s3-like-service:9000`. + region: + type: string + description: | + The AWS S3 region. The Region may be detected using s3:GetBucketLocation, but if you wish to avoid giving permissions to this API call or forbid it from the applicable IAM policy, you must then specify this property. + storageClass: + type: string + description: | + The [Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) to use for the backup object storage. By default, the `STANDARD` storage class is used. Other supported values include `STANDARD_IA` for Infrequent Access and `REDUCED_REDUNDANCY`. + awsCredentials: + type: object + description: | + The credentials to access AWS S3 for writing and reading. + properties: + secretKeySelectors: + type: object + description: | + Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `awsCredentials`. Note that you may use the same or different Secrets for the `accessKeyId` and the `secretAccessKey`. In the former case, the `keys` that identify each must be, obviously, different. + properties: + accessKeyId: + type: object + description: | + AWS [access key ID](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). For example, `AKIAIOSFODNN7EXAMPLE`. + properties: + key: + type: string + description: | + The key of the secret to select from. Must be a valid secret key. + name: + type: string + description: | + Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + required: ["key", "name"] + secretAccessKey: + type: object + description: | + AWS [secret access key](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). For example, `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`. + properties: + key: + type: string + description: | + The key of the secret to select from. Must be a valid secret key. + name: + type: string + description: | + Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + required: ["key", "name"] + caCertificate: + type: object + description: | + CA Certificate file to be used when connecting to the S3 Compatible Service. + properties: + key: + type: string + description: | + The key of the secret to select from. Must be a valid secret key. + name: + type: string + description: | + Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + required: ["key", "name"] + required: ["accessKeyId", "secretAccessKey"] + required: ["secretKeySelectors"] + required: ["bucket", "awsCredentials"] + gcs: + type: object + description: | + Google Cloud Storage configuration. + properties: + bucket: + type: string + pattern: "^(gs://)?[^/]+(/[^/]*)*$" + description: | + GCS bucket name. + gcpCredentials: + type: object + description: | + The credentials to access GCS for writing and reading. + properties: + fetchCredentialsFromMetadataService: + type: boolean + description: | + If true, the credentials will be fetched from the GCE/GKE metadata service and the field `secretKeySelectors` have to be set to null or omitted. + + This is useful when running StackGres inside a GKE cluster using [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). + secretKeySelectors: + type: object + description: | + A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) to reference the Secrets that contain the information about the Service Account to access GCS. + properties: + serviceAccountJSON: + type: object + description: | + A service account key from GCP. In JSON format, as downloaded from the GCP Console. + properties: + key: + type: string + description: | + The key of the secret to select from. Must be a valid secret key. + name: + type: string + description: | + Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + required: [ "key", "name" ] + required: [ "serviceAccountJSON" ] + required: [ "bucket", "gcpCredentials" ] + azureBlob: + type: object + description: | + Azure Blob Storage configuration. + properties: + bucket: + type: string + pattern: "^(azure://)?[^/]+(/[^/]*)*$" + description: | + Azure Blob Storage bucket name. + azureCredentials: + type: object + description: | + The credentials to access Azure Blob Storage for writing and reading. + properties: + secretKeySelectors: + type: object + description: | + Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) to reference the Secret(s) that contain the information about the `azureCredentials`. . Note that you may use the same or different Secrets for the `storageAccount` and the `accessKey`. In the former case, the `keys` that identify each must be, obviously, different. + properties: + storageAccount: + type: object + description: | + The [Storage Account](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview?toc=/azure/storage/blobs/toc.json) that contains the Blob bucket to be used. + properties: + key: + type: string + description: | + The key of the secret to select from. Must be a valid secret key. + name: + type: string + description: | + Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + required: [ "key", "name" ] + accessKey: + type: object + description: | + The [storage account access key](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal). + properties: + key: + type: string + description: | + The key of the secret to select from. Must be a valid secret key. + name: + type: string + description: | + Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + required: [ "key", "name" ] + required: [ "storageAccount", "accessKey" ] + required: [ "bucket", "azureCredentials" ] + required: [ "type" ] diff --git a/apis/stackgres/v1beta1/zz_generated.deepcopy.go b/apis/stackgres/v1beta1/zz_generated.deepcopy.go index ab8bc88f74..df66672ee1 100644 --- a/apis/stackgres/v1beta1/zz_generated.deepcopy.go +++ b/apis/stackgres/v1beta1/zz_generated.deepcopy.go @@ -348,7 +348,7 @@ func (in *SGObjectStorageSpecS3AwsCredentialsSecretKeySelectorsSecretAccessKey) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SGObjectStorageSpecS3Compatible) DeepCopyInto(out *SGObjectStorageSpecS3Compatible) { *out = *in - out.AwsCredentials = in.AwsCredentials + in.AwsCredentials.DeepCopyInto(&out.AwsCredentials) if in.EnablePathStyleAddressing != nil { in, out := &in.EnablePathStyleAddressing, &out.EnablePathStyleAddressing *out = new(bool) @@ -384,7 +384,7 @@ func (in *SGObjectStorageSpecS3Compatible) DeepCopy() *SGObjectStorageSpecS3Comp // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SGObjectStorageSpecS3CompatibleAwsCredentials) DeepCopyInto(out *SGObjectStorageSpecS3CompatibleAwsCredentials) { *out = *in - out.SecretKeySelectors = in.SecretKeySelectors + in.SecretKeySelectors.DeepCopyInto(&out.SecretKeySelectors) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGObjectStorageSpecS3CompatibleAwsCredentials. @@ -401,6 +401,11 @@ func (in *SGObjectStorageSpecS3CompatibleAwsCredentials) DeepCopy() *SGObjectSto func (in *SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectors) DeepCopyInto(out *SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectors) { *out = *in out.AccessKeyId = in.AccessKeyId + if in.CaCertificate != nil { + in, out := &in.CaCertificate, &out.CaCertificate + *out = new(SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsCaCertificate) + **out = **in + } out.SecretAccessKey = in.SecretAccessKey } @@ -429,6 +434,21 @@ func (in *SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsAccessK return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsCaCertificate) DeepCopyInto(out *SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsCaCertificate) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsCaCertificate. +func (in *SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsCaCertificate) DeepCopy() *SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsCaCertificate { + if in == nil { + return nil + } + out := new(SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsCaCertificate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsSecretAccessKey) DeepCopyInto(out *SGObjectStorageSpecS3CompatibleAwsCredentialsSecretKeySelectorsSecretAccessKey) { *out = *in diff --git a/apis/vshn/v1/dbaas_vshn_postgresql.go b/apis/vshn/v1/dbaas_vshn_postgresql.go index e573b87058..7de10c66e3 100644 --- a/apis/vshn/v1/dbaas_vshn_postgresql.go +++ b/apis/vshn/v1/dbaas_vshn_postgresql.go @@ -242,6 +242,13 @@ type VSHNPostgreSQLTLS struct { type VSHNPostgreSQLStatus struct { // InstanceNamespace contains the name of the namespace where the instance resides InstanceNamespace string `json:"instanceNamespace,omitempty"` + + // CurrentVersion contains the current version of PostgreSQL. + CurrentVersion string `json:"currentVersion,omitempty"` + + // PreviousVersion contains the previous version of PostgreSQL. + PreviousVersion string `json:"previousVersion,omitempty"` + // PostgreSQLConditions contains the status conditions of the backing object. PostgreSQLConditions []Condition `json:"postgresqlConditions,omitempty"` NamespaceConditions []Condition `json:"namespaceConditions,omitempty"` diff --git a/cmd/maintenance.go b/cmd/maintenance.go index 57323700db..648d5b05ac 100644 --- a/cmd/maintenance.go +++ b/cmd/maintenance.go @@ -3,6 +3,7 @@ package cmd import ( "context" "fmt" + "github.com/vshn/appcat/v4/pkg/auth/stackgres" "net/http" "time" @@ -10,8 +11,8 @@ import ( "github.com/spf13/viper" "github.com/thediveo/enumflag/v2" "github.com/vshn/appcat/v4/pkg" + "github.com/vshn/appcat/v4/pkg/auth" "github.com/vshn/appcat/v4/pkg/maintenance" - "github.com/vshn/appcat/v4/pkg/maintenance/auth" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -86,11 +87,24 @@ func (c *controller) runMaintenance(cmd *cobra.Command, _ []string) error { if sgNamespace == "" { return fmt.Errorf("missing environment variable: %s", "SG_NAMESPACE") } + apiPassword := viper.GetString("API_PASSWORD") + if apiPassword == "" { + return fmt.Errorf("missing environment variable: %s", "API_PASSWORD") + } + + apiUserName := viper.GetString("API_USERNAME") + if apiUserName == "" { + return fmt.Errorf("missing environment variable: %s", "API_USERNAME") + } + sClient, err := stackgres.New(apiUserName, apiPassword, sgNamespace) + if err != nil { + return err + } m = &maintenance.PostgreSQL{ - Client: kubeClient, - SgURL: "https://stackgres-restapi." + sgNamespace + ".svc", - MaintTimeout: time.Hour, + Client: kubeClient, + StackgresClient: sClient, + MaintTimeout: time.Hour, } case redis: m = maintenance.NewRedis(kubeClient, getHTTPClient()) diff --git a/crds/vshn.appcat.vshn.io_vshnpostgresqls.yaml b/crds/vshn.appcat.vshn.io_vshnpostgresqls.yaml index 78b0ff2a4e..b1afcce568 100644 --- a/crds/vshn.appcat.vshn.io_vshnpostgresqls.yaml +++ b/crds/vshn.appcat.vshn.io_vshnpostgresqls.yaml @@ -5316,6 +5316,9 @@ spec: type: string type: object type: array + currentVersion: + description: CurrentVersion contains the current version of PostgreSQL. + type: string instanceNamespace: description: InstanceNamespace contains the name of the namespace where the instance resides type: string @@ -5619,6 +5622,9 @@ spec: type: string type: object type: array + previousVersion: + description: PreviousVersion contains the previous version of PostgreSQL. + type: string profileConditions: items: properties: diff --git a/crds/vshn.appcat.vshn.io_xvshnpostgresqls.yaml b/crds/vshn.appcat.vshn.io_xvshnpostgresqls.yaml index 8fb991e3ea..ad36103771 100644 --- a/crds/vshn.appcat.vshn.io_xvshnpostgresqls.yaml +++ b/crds/vshn.appcat.vshn.io_xvshnpostgresqls.yaml @@ -6053,6 +6053,9 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + currentVersion: + description: CurrentVersion contains the current version of PostgreSQL. + type: string instanceNamespace: description: InstanceNamespace contains the name of the namespace where the instance resides @@ -6383,6 +6386,9 @@ spec: type: string type: object type: array + previousVersion: + description: PreviousVersion contains the previous version of PostgreSQL. + type: string profileConditions: items: properties: diff --git a/pkg/maintenance/auth/http.go b/pkg/auth/http.go similarity index 99% rename from pkg/maintenance/auth/http.go rename to pkg/auth/http.go index 06c47fac49..7890e30b3f 100644 --- a/pkg/maintenance/auth/http.go +++ b/pkg/auth/http.go @@ -1,10 +1,9 @@ package auth import ( + "k8s.io/client-go/transport" "net/http" "time" - - "k8s.io/client-go/transport" ) // GetAuthHTTPClient returns a HTTP client which is authenticated. It can be used to query private images. diff --git a/pkg/auth/stackgres/client.go b/pkg/auth/stackgres/client.go new file mode 100644 index 0000000000..3b167177e3 --- /dev/null +++ b/pkg/auth/stackgres/client.go @@ -0,0 +1,121 @@ +package stackgres + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "github.com/hashicorp/go-version" + "net/http" + "sort" +) + +type loginRequest struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` +} + +type authToken struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` +} + +// PgVersions contains available postgres versions +type PgVersions struct { + Postgresql []string `json:"postgresql"` +} + +// StackgresClient creates a client to connect to Stackgres Operator +type StackgresClient struct { + username, password, sgNamespace, prefixUrl string + httpClient *http.Client + token authToken +} + +// New creates a Stackgres client from username, password and namespace where Stackgres is running +func New(username, password, sgNamespace string) (*StackgresClient, error) { + t := http.DefaultTransport.(*http.Transport).Clone() + t.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + httpClient := &http.Client{Transport: t} + + auth := loginRequest{Username: username, Password: password} + + byteAuth, err := json.Marshal(auth) + if err != nil { + return nil, fmt.Errorf("cannot marshal login json: %w", err) + } + prefixUrl := "https://stackgres-restapi." + sgNamespace + ".svc.cluster.local" + resp, err := httpClient.Post(prefixUrl+"/stackgres/auth/login", "application/json", bytes.NewBuffer(byteAuth)) + if err != nil { + return nil, fmt.Errorf("cannot login: %w", err) + } + + token := &authToken{} + err = json.NewDecoder(resp.Body).Decode(token) + if err != nil { + return nil, fmt.Errorf("cannot decode login token: %w", err) + } + + return &StackgresClient{ + sgNamespace: sgNamespace, + username: username, + password: password, + prefixUrl: prefixUrl, + httpClient: httpClient, + token: *token, + }, nil +} + +// GetAvailableVersions fetches all available versions +func (c StackgresClient) GetAvailableVersions() (*PgVersions, error) { + req, err := http.NewRequest("GET", c.prefixUrl+"/stackgres/version/postgresql", nil) + if err != nil { + return nil, fmt.Errorf("cannot get list of versions: %w", err) + } + req.Header.Add("Authorization", "Bearer "+c.token.AccessToken) + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("error during http request: %w", err) + } + versionList := &PgVersions{} + + err = json.NewDecoder(resp.Body).Decode(versionList) + if err != nil { + return nil, fmt.Errorf("error during json decoding: %w", err) + } + return versionList, nil +} + +// GetLatestMinorVersion searching most current minor version +func GetLatestMinorVersion(vers string, versionList *PgVersions) (string, error) { + + if versionList == nil { + return vers, nil + } + + current, err := version.NewVersion(vers) + if err != nil { + return "", err + } + + validVersions := make([]*version.Version, 0) + for _, newVersion := range versionList.Postgresql { + tmpVersion, err := version.NewVersion(newVersion) + if err != nil { + return "", err + } + if tmpVersion.Segments()[0] == current.Segments()[0] { + validVersions = append(validVersions, tmpVersion) + } + } + + sort.Sort(sort.Reverse(version.Collection(validVersions))) + + if len(validVersions) != 0 && current.LessThan(validVersions[0]) { + return validVersions[0].Original(), nil + } + + return current.Original(), nil +} diff --git a/pkg/auth/stackgres/client_test.go b/pkg/auth/stackgres/client_test.go new file mode 100644 index 0000000000..673437d672 --- /dev/null +++ b/pkg/auth/stackgres/client_test.go @@ -0,0 +1,78 @@ +package stackgres + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestGetLatestMinorVersion(t *testing.T) { + tests := []struct { + name string + inputVersion string + versionList *PgVersions + expected string + expectedError string + }{ + { + name: "No version list provided", + inputVersion: "14", + versionList: nil, + expected: "14", + }, + { + name: "Empty version list", + inputVersion: "14", + versionList: &PgVersions{Postgresql: []string{}}, + expected: "14", + }, + { + name: "Single matching minor version", + inputVersion: "14", + versionList: &PgVersions{Postgresql: []string{"14.3"}}, + expected: "14.3", + }, + { + name: "Multiple minor versions, latest chosen", + inputVersion: "14", + versionList: &PgVersions{Postgresql: []string{"14.1", "14.3", "14.4"}}, + expected: "14.4", + }, + { + name: "No matching major version", + inputVersion: "14", + versionList: &PgVersions{Postgresql: []string{"13.5", "15.1"}}, + expected: "14", + }, + { + name: "Minor version is 0", + inputVersion: "14", + versionList: &PgVersions{Postgresql: []string{"13.3", "14.0"}}, + expected: "14", + }, + { + name: "Invalid input version format", + inputVersion: "invalid-version", + versionList: &PgVersions{Postgresql: []string{"14.3", "14.4"}}, + expectedError: "Malformed version: invalid-version", + }, + { + name: "Invalid version in version list", + inputVersion: "14.2", + versionList: &PgVersions{Postgresql: []string{"14.3", "invalid-version"}}, + expectedError: "Malformed version: invalid-version", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := GetLatestMinorVersion(tt.inputVersion, tt.versionList) + + if tt.expectedError != "" { + assert.EqualError(t, err, tt.expectedError) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }) + } +} diff --git a/pkg/comp-functions/functions/vshnpostgres/delay_cluster.go b/pkg/comp-functions/functions/vshnpostgres/delay_cluster.go index ebca971b78..052cab9e12 100644 --- a/pkg/comp-functions/functions/vshnpostgres/delay_cluster.go +++ b/pkg/comp-functions/functions/vshnpostgres/delay_cluster.go @@ -48,7 +48,7 @@ func DelayClusterDeployment(_ context.Context, comp *vshnv1.VSHNPostgreSQL, svc return runtime.NewWarningResult("SGObjectStorage is not yet ready, skipping creation of cluster") } - if !kubeObjectSyncedAndReady(comp.GetName()+"-"+configResourceName, svc) { + if !kubeObjectSyncedAndReady(fmt.Sprintf("%s-%s-%s", comp.GetName(), configResourceName, comp.Status.CurrentVersion), svc) { return runtime.NewWarningResult("SGPostgresConfig is not yet ready, skipping creation of cluster") } diff --git a/pkg/comp-functions/functions/vshnpostgres/loadBalancer.go b/pkg/comp-functions/functions/vshnpostgres/load_balancer.go similarity index 100% rename from pkg/comp-functions/functions/vshnpostgres/loadBalancer.go rename to pkg/comp-functions/functions/vshnpostgres/load_balancer.go diff --git a/pkg/comp-functions/functions/vshnpostgres/loadBalancer_test.go b/pkg/comp-functions/functions/vshnpostgres/load_balancer_test.go similarity index 100% rename from pkg/comp-functions/functions/vshnpostgres/loadBalancer_test.go rename to pkg/comp-functions/functions/vshnpostgres/load_balancer_test.go diff --git a/pkg/comp-functions/functions/vshnpostgres/maintenance.go b/pkg/comp-functions/functions/vshnpostgres/maintenance.go index 79ef7a9a6d..31614101aa 100644 --- a/pkg/comp-functions/functions/vshnpostgres/maintenance.go +++ b/pkg/comp-functions/functions/vshnpostgres/maintenance.go @@ -19,6 +19,8 @@ import ( "k8s.io/utils/ptr" ) +const stackgresCredObserver = "stackgres-creds-observer" + var ( maintSecretName = "maintenancesecret" service = "postgresql" @@ -214,7 +216,7 @@ func addStackgresCredentialsObserver(svc *runtime.ServiceRuntime, comp *vshnv1.V }, } - err := svc.SetDesiredKubeObject(stackgresCredentials, comp.GetName()+"-stackgres-creds-observer", runtime.KubeOptionObserve) + err := svc.SetDesiredKubeObject(stackgresCredentials, fmt.Sprintf("%s-%s", comp.GetName(), stackgresCredObserver), runtime.KubeOptionObserve) if err != nil { return fmt.Errorf("cannot deploy stackgres credentials observer: %w", err) } diff --git a/pkg/comp-functions/functions/vshnpostgres/major_version_upgrade.go b/pkg/comp-functions/functions/vshnpostgres/major_version_upgrade.go new file mode 100644 index 0000000000..2166d15548 --- /dev/null +++ b/pkg/comp-functions/functions/vshnpostgres/major_version_upgrade.go @@ -0,0 +1,204 @@ +package vshnpostgres + +import ( + "context" + "errors" + "fmt" + xfnproto "github.com/crossplane/function-sdk-go/proto/v1beta1" + stackgresv1 "github.com/vshn/appcat/v4/apis/stackgres/v1" + vshnv1 "github.com/vshn/appcat/v4/apis/vshn/v1" + "github.com/vshn/appcat/v4/pkg/auth/stackgres" + "github.com/vshn/appcat/v4/pkg/comp-functions/runtime" + v2 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pointer "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" +) + +const majorUpgradeSuffix = "major-upgrade-dbops" + +// MajorVersionUpgrade upgrades next major postgres version. The latest minor version is applied +func MajorVersionUpgrade(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *runtime.ServiceRuntime) *xfnproto.Result { + log := ctrl.LoggerFrom(ctx) + comp, err := getVSHNPostgreSQL(ctx, svc) + + if err != nil { + return runtime.NewWarningResult(fmt.Sprintf("cannot get composite from function io: %v", err)) + } + + expectedV := comp.Spec.Parameters.Service.MajorVersion + currentV := comp.Status.CurrentVersion + + // If current and expected versions do not match then issue a major version upgrade via SGDBOps resource + if currentV != "" && currentV != expectedV { + log.Info("detected major version upgrade") + err = upgradePGSettings(comp, svc) + if err != nil { + log.Error(err, "failed to upgrade PG settings during major version upgrade") + return runtime.NewWarningResult(fmt.Sprintf("cannot upgrade PG settings: %v", err)) + } + majorUpgradeDbOps, err := getSgDbOpsIfExists(err, svc, comp) + if err != nil { + log.Error(err, "failed to get major upgrade sgdbops resource") + return runtime.NewWarningResult(fmt.Sprintf("cannot get major upgrade SgDbOps resource: %v", err)) + } + + // If SGDBOps resource does not exist create it and keep during upgrade otherwise cleanup if successful + if majorUpgradeDbOps == nil { + log.Info("Creating SgDbOps major version upgrade resource") + return createMajorUpgradeSgDbOps(ctx, svc, comp, expectedV) + } else if isSuccessful(majorUpgradeDbOps.Status.Conditions) { + log.Info("Major version upgrade successfully completed") + updateStatusVersion(svc, comp, expectedV) + } + return keepSgDbOpsResource(svc, comp, majorUpgradeDbOps) + } + log.Info("No major version upgrade detected") + return nil +} + +// upgradePGSettings creates a new SGPostgresConfig resource with the new expected major +func upgradePGSettings(comp *vshnv1.VSHNPostgreSQL, svc *runtime.ServiceRuntime) error { + conf := &stackgresv1.SGPostgresConfig{} + err := svc.GetObservedKubeObject(conf, fmt.Sprintf("%s-%s-%s", comp.GetName(), configResourceName, comp.Status.CurrentVersion)) + if err != nil { + // For compatibility purposes the old PG settings has to be considered + err = svc.GetObservedKubeObject(conf, fmt.Sprintf("%s-%s", comp.GetName(), configResourceName)) + if err != nil { + return fmt.Errorf("cannot get observed kube object postgres config: %v", err) + } + } + + expectedV := comp.Spec.Parameters.Service.MajorVersion + upgradeVersionConfig := &stackgresv1.SGPostgresConfig{ + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-postgres-config-%s", comp.GetName(), expectedV), + Namespace: comp.GetInstanceNamespace(), + }, + Spec: stackgresv1.SGPostgresConfigSpec{ + PostgresVersion: expectedV, + PostgresqlConf: conf.Spec.PostgresqlConf, + }, + } + err = svc.SetDesiredKubeObject(upgradeVersionConfig, fmt.Sprintf("%s-%s-%s", comp.GetName(), configResourceName, expectedV)) + if err != nil { + return fmt.Errorf("cannot create current version postgres config: %v", err) + } + return nil +} + +// getSgDbOpsIfExists returns the existing major upgrade SgDbOps resource if upgrade is in process +func getSgDbOpsIfExists(err error, svc *runtime.ServiceRuntime, comp *vshnv1.VSHNPostgreSQL) (*stackgresv1.SGDbOps, error) { + majorUpgradeDbOps := &stackgresv1.SGDbOps{} + err = svc.GetObservedKubeObject(majorUpgradeDbOps, fmt.Sprintf("%s-%s", comp.GetName(), majorUpgradeSuffix)) + if err != nil && !errors.Is(err, runtime.ErrNotFound) { + return nil, fmt.Errorf("cannot get observed kube object major upgrade sgdbops: %v", err) + } + if errors.Is(err, runtime.ErrNotFound) { + return nil, nil + } + return majorUpgradeDbOps, nil +} + +// keepSgDbOpsResource saves the major upgrade SgDbOps for the duration of the major upgrade +func keepSgDbOpsResource(svc *runtime.ServiceRuntime, comp *vshnv1.VSHNPostgreSQL, majorUpgradeDbOps *stackgresv1.SGDbOps) *xfnproto.Result { + err := svc.SetDesiredKubeObject(majorUpgradeDbOps, fmt.Sprintf("%s-%s", comp.GetName(), majorUpgradeSuffix), runtime.KubeOptionAllowDeletion) + if err != nil { + return runtime.NewWarningResult(fmt.Sprintf("cannot keep major upgrade kube object %s", comp.GetName())) + } + return runtime.NewWarningResult("Major upgrade is not completed or it failed") +} + +// updateStatusVersion rotates the status versions in the composite +func updateStatusVersion(svc *runtime.ServiceRuntime, comp *vshnv1.VSHNPostgreSQL, v string) *xfnproto.Result { + comp.Status.PreviousVersion = comp.Status.CurrentVersion + comp.Status.CurrentVersion = v + err := svc.SetDesiredCompositeStatus(comp) + if err != nil { + return runtime.NewWarningResult(fmt.Sprintf("cannot update status field with the newest major postgres version: %v", err)) + } + return runtime.NewNormalResult("Major upgrade successfully finished, SGDBOps cleaned up") +} + +// isSuccessful checks whether the major version upgrade was successful +func isSuccessful(conditions *[]stackgresv1.SGDbOpsStatusConditionsItem) bool { + if conditions == nil { + return false + } + + var hasCompleted, hasFailed bool + for _, c := range *conditions { + switch { + case *c.Reason == "OperationFailed" && *c.Status == "True": + hasFailed = true + case *c.Reason == "OperationCompleted" && *c.Status == "True": + hasCompleted = true + } + if hasFailed { + return false + } + } + + return hasCompleted +} + +// createMajorUpgradeSgDbOps create a major upgrade SgDbOps resource to start the upgrade process +func createMajorUpgradeSgDbOps(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VSHNPostgreSQL, expectedV string) *xfnproto.Result { + log := ctrl.LoggerFrom(ctx) + cluster := &stackgresv1.SGCluster{} + err := svc.GetObservedKubeObject(cluster, "cluster") + if err != nil { + log.Error(err, "cannot get observed sgcluster object") + return runtime.NewWarningResult(fmt.Sprintf("cannot get observed kube object cluster: %v", err)) + } + + sgNamespace := svc.Config.Data["sgNamespace"] + stacgresRestApi := &v2.Secret{} + err = svc.GetObservedKubeObject(stacgresRestApi, fmt.Sprintf("%s-%s", comp.GetName(), stackgresCredObserver)) + if err != nil { + log.Error(err, "cannot get username and password") + return runtime.NewWarningResult(fmt.Sprintf("cannot get observed stackgres-restapi-admin secret: %v", err)) + } + + stackgresClient, err := stackgres.New(string(stacgresRestApi.Data["k8sUsername"]), string(stacgresRestApi.Data["clearPassword"]), sgNamespace) + if err != nil { + log.Error(err, "cannot create stackgres client") + return runtime.NewWarningResult(fmt.Sprintf("cannot initialize stackgres client: %v", err)) + } + + vList, err := stackgresClient.GetAvailableVersions() + if err != nil { + log.Error(err, "cannot get postgres available versions") + return runtime.NewWarningResult(fmt.Sprintf("cannot get postgres version list: %v", err)) + } + + majorMinorVersion, err := stackgres.GetLatestMinorVersion(expectedV, vList) + if err != nil { + log.Error(err, "cannot get the latest minor postgres version") + return runtime.NewWarningResult(fmt.Sprintf("cannot get latest minor version: %v", err)) + } + + sgdbops := &stackgresv1.SGDbOps{ + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", comp.GetName(), majorUpgradeSuffix), + Namespace: comp.GetInstanceNamespace(), + }, + Spec: stackgresv1.SGDbOpsSpec{ + MajorVersionUpgrade: &stackgresv1.SGDbOpsSpecMajorVersionUpgrade{ + Link: pointer.To(true), + PostgresVersion: &majorMinorVersion, + SgPostgresConfig: pointer.To(fmt.Sprintf("%s-postgres-config-%s", comp.GetName(), expectedV)), + }, + Op: "majorVersionUpgrade", + SgCluster: cluster.GetName(), + }, + } + + err = svc.SetDesiredKubeObject(sgdbops, fmt.Sprintf("%s-%s", comp.GetName(), majorUpgradeSuffix), runtime.KubeOptionAllowDeletion) + if err != nil { + log.Error(err, "cannot set desired major postgres version sgdbops resource") + return runtime.NewWarningResult(fmt.Sprintf("cannot create major upgrade kube object %s", comp.GetName())) + } + + return runtime.NewNormalResult("SGDBOps for major upgrade created") +} diff --git a/pkg/comp-functions/functions/vshnpostgres/major_version_upgrade_test.go b/pkg/comp-functions/functions/vshnpostgres/major_version_upgrade_test.go new file mode 100644 index 0000000000..bd5c6e33d4 --- /dev/null +++ b/pkg/comp-functions/functions/vshnpostgres/major_version_upgrade_test.go @@ -0,0 +1,72 @@ +package vshnpostgres + +import ( + sgv1 "github.com/vshn/appcat/v4/apis/stackgres/v1" + "k8s.io/utils/ptr" + "testing" +) + +func Test_IsSuccessful(t *testing.T) { + tests := []struct { + name string + conditions *[]sgv1.SGDbOpsStatusConditionsItem + expected bool + }{ + { + name: "Nil conditions", + conditions: nil, + expected: false, + }, + { + name: "Empty conditions", + conditions: &[]sgv1.SGDbOpsStatusConditionsItem{}, + expected: false, + }, + { + name: "OperationCompleted is True, no OperationFailed", + conditions: &[]sgv1.SGDbOpsStatusConditionsItem{ + {Reason: ptr.To("OperationCompleted"), Status: ptr.To("True")}, + }, + expected: true, + }, + { + name: "OperationFailed is True", + conditions: &[]sgv1.SGDbOpsStatusConditionsItem{ + {Reason: ptr.To("OperationFailed"), Status: ptr.To("True")}, + }, + expected: false, + }, + { + name: "Both OperationCompleted and OperationFailed are True", + conditions: &[]sgv1.SGDbOpsStatusConditionsItem{ + {Reason: ptr.To("OperationCompleted"), Status: ptr.To("True")}, + {Reason: ptr.To("OperationFailed"), Status: ptr.To("True")}, + }, + expected: false, + }, + { + name: "OperationRunning is True", + conditions: &[]sgv1.SGDbOpsStatusConditionsItem{ + {Reason: ptr.To("OperationRunning"), Status: ptr.To("True")}, + }, + expected: false, + }, + { + name: "OperationCompleted is True, other conditions exist", + conditions: &[]sgv1.SGDbOpsStatusConditionsItem{ + {Reason: ptr.To("OperationCompleted"), Status: ptr.To("True")}, + {Reason: ptr.To("OperationNotRunning"), Status: ptr.To("True")}, + }, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isSuccessful(tt.conditions) + if result != tt.expected { + t.Errorf("isSuccessful() = %v, want %v", result, tt.expected) + } + }) + } +} diff --git a/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy.go b/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy.go index ddf30cbf45..53dff99b72 100644 --- a/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy.go +++ b/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy.go @@ -51,6 +51,11 @@ func DeployPostgreSQL(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *run if err != nil { return runtime.NewWarningResult(fmt.Errorf("cannot bootstrap instance namespace: %w", err).Error()) } + l.Info("Set major version in status") + err = setMajorVersionStatus(comp, svc) + if err != nil { + return runtime.NewWarningResult(fmt.Errorf("cannot create tls certificate: %w", err).Error()) + } l.Info("Create tls certificate") err = createCerts(comp, svc) @@ -92,6 +97,16 @@ func DeployPostgreSQL(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *run return nil } +// setMajorVersionStatus sets version in status only when it is provisioned +// The subsequent update of this field is to happen in the MajorUpgrade comp-func +func setMajorVersionStatus(comp *vshnv1.VSHNPostgreSQL, svc *runtime.ServiceRuntime) error { + if comp.Status.CurrentVersion == "" { + comp.Status.CurrentVersion = comp.Spec.Parameters.Service.MajorVersion + return svc.SetDesiredCompositeStatus(comp) + } + return nil +} + func createCerts(comp *vshnv1.VSHNPostgreSQL, svc *runtime.ServiceRuntime) error { selfSignedIssuer := &cmv1.Issuer{ ObjectMeta: metav1.ObjectMeta{ @@ -224,6 +239,7 @@ func createSgInstanceProfile(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, s "setupArbitraryUser": "setup-arbitrary-user", "clusterReconciliationCycle": "cluster-reconciliation-cycle", "setDbopsRunning": "dbops.set-dbops-running", + "setMajorVersionUpgrade": "major-version-upgrade", } res, errs := common.GetResources(&comp.Spec.Parameters.Size, resources) @@ -324,26 +340,47 @@ func createSgPostgresConfig(comp *vshnv1.VSHNPostgreSQL, svc *runtime.ServiceRun } } - sgPostgresConfig := &sgv1.SGPostgresConfig{ + pgConfigName, pgKubeName := getCurrentSettings(comp, svc, comp.Status.CurrentVersion, comp.Status.PreviousVersion) + + currentVersionConfig := sgv1.SGPostgresConfig{ ObjectMeta: metav1.ObjectMeta{ - Name: comp.GetName(), + Name: pgConfigName, Namespace: comp.GetInstanceNamespace(), }, Spec: sgv1.SGPostgresConfigSpec{ - PostgresVersion: comp.Spec.Parameters.Service.MajorVersion, + PostgresVersion: comp.Status.CurrentVersion, PostgresqlConf: pgConf, }, } - err := svc.SetDesiredKubeObject(sgPostgresConfig, comp.GetName()+"-"+configResourceName) + err := svc.SetDesiredKubeObject(¤tVersionConfig, pgKubeName, runtime.KubeOptionAllowDeletion) if err != nil { - err = fmt.Errorf("cannot create sgInstanceProfile: %w", err) - return err + return fmt.Errorf("cannot create current version postgres config: %w", err) } return nil } +// getCurrentSettings returns the kube object name pgKubeName and wrapped resource name pgConfigName +// This function ensures compatibility with older Postgres instances where SGPGSettings resources name was missing the major version +// If such resource exists (ex:. pg-instance-cntqx) then it will be kept with this naming until a new major version upgrade is issued +// New postgres instances have the format: +// Kube name - pg-instance-pg-conf-14 +// Resource name - pg-instance-postgres-config-14 +func getCurrentSettings(comp *vshnv1.VSHNPostgreSQL, svc *runtime.ServiceRuntime, currentV, previousV string) (string, string) { + pgConfigName := fmt.Sprintf("%s-postgres-config-%s", comp.GetName(), currentV) + pgKubeName := fmt.Sprintf("%s-%s-%s", comp.GetName(), configResourceName, currentV) + + existingProfile := &sgv1.SGPostgresConfig{} + _ = svc.GetObservedKubeObject(existingProfile, fmt.Sprintf("%s-%s", comp.GetName(), configResourceName)) + + if existingProfile.Name != "" && previousV == "" { + pgKubeName = fmt.Sprintf("%s-%s", comp.GetName(), configResourceName) + pgConfigName = existingProfile.Name + } + return pgConfigName, pgKubeName +} + func createSgCluster(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *runtime.ServiceRuntime) error { l := svc.Log @@ -366,6 +403,8 @@ func createSgCluster(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *runt return fmt.Errorf("cannot fetch nodeSelector from the composition config: %w", err) } + pgConfigName, _ := getCurrentSettings(comp, svc, comp.Status.CurrentVersion, comp.Status.PreviousVersion) + initialData := &sgv1.SGClusterSpecInitialData{} backupRef := xkubev1.Reference{} if comp.Spec.Parameters.Restore != nil && comp.Spec.Parameters.Restore.BackupName != "" { @@ -388,7 +427,7 @@ func createSgCluster(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *runt Instances: comp.Spec.Parameters.Instances, SgInstanceProfile: ptr.To(comp.GetName()), Configurations: &sgv1.SGClusterSpecConfigurations{ - SgPostgresConfig: ptr.To(comp.GetName()), + SgPostgresConfig: ptr.To(pgConfigName), Backups: &[]sgv1.SGClusterSpecConfigurationsBackupsItem{ { SgObjectStorage: "sgbackup-" + comp.GetName(), @@ -398,7 +437,7 @@ func createSgCluster(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *runt }, InitialData: initialData, Postgres: sgv1.SGClusterSpecPostgres{ - Version: comp.Spec.Parameters.Service.MajorVersion, + Version: comp.Status.CurrentVersion, }, Pods: sgv1.SGClusterSpecPods{ PersistentVolume: sgv1.SGClusterSpecPodsPersistentVolume{ @@ -444,7 +483,7 @@ func createSgCluster(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *runt // are referenced in the kube object. This will lead to the object getting stuck indefinitely. err = svc.SetDesiredKubeObjectWithName(sgCluster, comp.GetName()+"-cluster", "cluster", runtime.KubeOptionAddRefs(backupRef), runtime.KubeOptionProtects(namespaceResName)) if err != nil { - err = fmt.Errorf("cannot create sgInstanceProfile: %w", err) + err = fmt.Errorf("cannot create sgCluster: %w", err) return err } diff --git a/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy_test.go b/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy_test.go index 10ad2f7084..328151049b 100644 --- a/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy_test.go +++ b/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy_test.go @@ -3,6 +3,7 @@ package vshnpostgres import ( "context" "encoding/json" + "fmt" "testing" "time" @@ -61,7 +62,7 @@ func TestPostgreSqlDeploy(t *testing.T) { assert.Equal(t, comp.Spec.Parameters.Instances, cluster.Spec.Instances) assert.Nil(t, cluster.Spec.InitialData.Restore) assert.Equal(t, comp.GetName(), *cluster.Spec.SgInstanceProfile) - assert.Equal(t, comp.GetName(), *cluster.Spec.Configurations.SgPostgresConfig) + assert.Equal(t, fmt.Sprintf("%s-postgres-config-%s", comp.GetName(), comp.Spec.Parameters.Service.MajorVersion), *cluster.Spec.Configurations.SgPostgresConfig) backups := *cluster.Spec.Configurations.Backups assert.Equal(t, "sgbackup-"+comp.GetName(), backups[0].SgObjectStorage) assert.Equal(t, comp.Spec.Parameters.Backup.Schedule, *(backups[0].CronSchedule)) @@ -82,7 +83,7 @@ func TestPostgreSqlDeploy(t *testing.T) { assert.Nil(t, sgInstanceProfile.Spec.HugePages) sgPostgresConfig := &sgv1.SGPostgresConfig{} - assert.NoError(t, svc.GetDesiredKubeObject(sgPostgresConfig, comp.GetName()+"-"+configResourceName)) + assert.NoError(t, svc.GetDesiredKubeObject(sgPostgresConfig, comp.GetName()+"-"+configResourceName+"-15")) assert.Equal(t, comp.Spec.Parameters.Service.MajorVersion, sgPostgresConfig.Spec.PostgresVersion) assert.Equal(t, map[string]string{}, sgPostgresConfig.Spec.PostgresqlConf) @@ -113,7 +114,7 @@ func TestPostgreSqlDeployWithPgConfig(t *testing.T) { assert.NoError(t, svc.GetDesiredKubeObject(cluster, "cluster")) sgPostgresConfig := &sgv1.SGPostgresConfig{} - assert.NoError(t, svc.GetDesiredKubeObject(sgPostgresConfig, "pgsql-gc9x4-"+configResourceName)) + assert.NoError(t, svc.GetDesiredKubeObject(sgPostgresConfig, "pgsql-gc9x4-"+configResourceName+"-15")) assert.Contains(t, sgPostgresConfig.Spec.PostgresqlConf, "timezone") assert.Equal(t, "Europe/Zurich", sgPostgresConfig.Spec.PostgresqlConf["timezone"]) } diff --git a/pkg/comp-functions/functions/vshnpostgres/register.go b/pkg/comp-functions/functions/vshnpostgres/register.go index 46343c0471..0f986cd5c4 100644 --- a/pkg/comp-functions/functions/vshnpostgres/register.go +++ b/pkg/comp-functions/functions/vshnpostgres/register.go @@ -84,6 +84,10 @@ func init() { Name: "custom-exporter-configs", Execute: PgExporterConfig, }, + { + Name: "major-version-upgrade", + Execute: MajorVersionUpgrade, + }, }, }) } diff --git a/pkg/comp-functions/functions/vshnpostgres/restart.go b/pkg/comp-functions/functions/vshnpostgres/restart.go index 6c48fa36d2..34fab76b34 100644 --- a/pkg/comp-functions/functions/vshnpostgres/restart.go +++ b/pkg/comp-functions/functions/vshnpostgres/restart.go @@ -38,6 +38,10 @@ func transformRestart(ctx context.Context, svc *runtime.ServiceRuntime, now func return nil } + if isMajorUpgradeRunning(comp) { + return nil + } + restartTime, err := getPendingRestart(ctx, svc) if err != nil { return runtime.NewWarningResult(err.Error()) @@ -55,6 +59,10 @@ func transformRestart(ctx context.Context, svc *runtime.ServiceRuntime, now func return nil } +func isMajorUpgradeRunning(comp *vshnv1.VSHNPostgreSQL) bool { + return comp.Spec.Parameters.Service.MajorVersion != comp.Status.CurrentVersion +} + func getPendingRestart(ctx context.Context, svc *runtime.ServiceRuntime) (time.Time, error) { cluster := sgv1.SGCluster{} diff --git a/pkg/comp-functions/runtime/function_mgr.go b/pkg/comp-functions/runtime/function_mgr.go index f7865576b6..93a574392a 100644 --- a/pkg/comp-functions/runtime/function_mgr.go +++ b/pkg/comp-functions/runtime/function_mgr.go @@ -456,7 +456,7 @@ func ComposedOptionProtects(resName string) ComposedResourceOption { } // SetDesiredKubeObject takes any `runtime.Object`, puts it into a provider-kubernetes Object and then -// adds it to the desired composed resources. It takes options to manipulate the resulting kubec object before applying. +// adds it to the desired composed resources. It takes options to manipulate the resulting kube object before applying. func (s *ServiceRuntime) SetDesiredKubeObject(obj client.Object, objectName string, opts ...KubeObjectOption) error { kobj, err := s.putIntoObject(obj, objectName, objectName) diff --git a/pkg/controller/webhooks/postgresql.go b/pkg/controller/webhooks/postgresql.go index 21c50f4f92..2a2a442089 100644 --- a/pkg/controller/webhooks/postgresql.go +++ b/pkg/controller/webhooks/postgresql.go @@ -4,17 +4,18 @@ import ( "context" "encoding/json" "fmt" - - vshnv1 "github.com/vshn/appcat/v4/apis/vshn/v1" "github.com/vshn/appcat/v4/pkg/common/quotas" "github.com/vshn/appcat/v4/pkg/common/utils" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "strconv" + + vshnv1 "github.com/vshn/appcat/v4/apis/vshn/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -28,39 +29,40 @@ import ( //+kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch;patch;update;delete //+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;patch;update;delete +const ( + maxResourceNameLength = 30 +) + var ( pgGK = schema.GroupKind{Group: "vshn.appcat.vshn.io", Kind: "VSHNPostgreSQL"} pgGR = schema.GroupResource{Group: pgGK.Group, Resource: "vshnpostgresqls"} -) -var _ webhook.CustomValidator = &PostgreSQLWebhookHandler{} - -var blocklist = map[string]string{ - "listen_addresses": "", - "port": "", - "cluster_name": "", - "hot_standby": "", - "fsync": "", - "full_page_writes": "", - "log_destination": "", - "logging_collector": "", - "max_replication_slots": "", - "max_wal_senders": "", - "wal_keep_segments": "", - "wal_level": "", - "wal_log_hints": "", - "archive_mode": "", - "archive_command": "", -} + _ webhook.CustomValidator = &PostgreSQLWebhookHandler{} + + blocklist = map[string]string{ + "listen_addresses": "", + "port": "", + "cluster_name": "", + "hot_standby": "", + "fsync": "", + "full_page_writes": "", + "log_destination": "", + "logging_collector": "", + "max_replication_slots": "", + "max_wal_senders": "", + "wal_keep_segments": "", + "wal_level": "", + "wal_log_hints": "", + "archive_mode": "", + "archive_command": "", + } +) -// PostgreSQLWebhookHandler handles all quota webhooks concerning postgresql by vshn. type PostgreSQLWebhookHandler struct { DefaultWebhookHandler } -// SetupPostgreSQLWebhookHandlerWithManager registers the validation webhook with the manager. func SetupPostgreSQLWebhookHandlerWithManager(mgr ctrl.Manager, withQuota bool) error { - return ctrl.NewWebhookManagedBy(mgr). For(&vshnv1.VSHNPostgreSQL{}). WithValidator(&PostgreSQLWebhookHandler{ @@ -77,145 +79,75 @@ func SetupPostgreSQLWebhookHandlerWithManager(mgr ctrl.Manager, withQuota bool) Complete() } -// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type func (p *PostgreSQLWebhookHandler) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - allErrs := field.ErrorList{} - pg, ok := obj.(*vshnv1.VSHNPostgreSQL) - if !ok { - return nil, fmt.Errorf("provided manifest is not a valid VSHNPostgreSQL object") - } - - err := validateVacuumRepack(pg.Spec.Parameters.Service.VacuumEnabled, pg.Spec.Parameters.Service.RepackEnabled) - if err != nil { - allErrs = append(allErrs, &field.Error{ - Field: "spec.parameters.service", - Detail: fmt.Sprintf("pg.Spec.Parameters.Service.VacuumEnabled and pg.Spec.Parameters.Service.RepackEnabled settings can't be both disabled: %s", err.Error()), - Type: field.ErrorTypeForbidden, - }) - } - - if p.withQuota { - quotaErrs, fieldErrs := p.checkPostgreSQLQuotas(ctx, pg, true) - if quotaErrs != nil { - allErrs = append(allErrs, &field.Error{ - Field: "quota", - Detail: fmt.Sprintf("quota check failed: %s", - quotaErrs.Error()), - BadValue: "*your namespace quota*", - Type: field.ErrorTypeForbidden, - }) - } - allErrs = append(allErrs, fieldErrs...) - } - - instancesError := p.checkGuaranteedAvailability(pg) - - allErrs = append(allErrs, instancesError...) - - // longest postfix is 26 chars for the sgbackup object (eg. "-952zx-2024-07-25-12-50-10"). Max SgBackup length is 56, therefore 30 characters is the maximum length - err = p.validateResourceNameLength(pg.GetName(), 30) - if err != nil { - allErrs = append(allErrs, &field.Error{ - Field: ".metadata.name", - Detail: fmt.Sprintf("Please shorten PostgreSQL name to 30 characters or less: %s", - err.Error()), - BadValue: pg.GetName(), - Type: field.ErrorTypeTooLong, - }) - } - - errList := validatePgConf(pg) - if errList != nil { - allErrs = append(allErrs, errList...) - } - - if len(allErrs) != 0 { - return nil, apierrors.NewInvalid( - pgGK, - pg.GetName(), - allErrs, - ) - } - - return nil, nil + return p.validatePostgreSQL(ctx, obj, nil, true) } -// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type func (p *PostgreSQLWebhookHandler) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + return p.validatePostgreSQL(ctx, newObj, oldObj, false) +} +func (p *PostgreSQLWebhookHandler) validatePostgreSQL(ctx context.Context, newObj, oldObj runtime.Object, isCreate bool) (admission.Warnings, error) { allErrs := field.ErrorList{} - pg, ok := newObj.(*vshnv1.VSHNPostgreSQL) + newPg, ok := newObj.(*vshnv1.VSHNPostgreSQL) if !ok { return nil, fmt.Errorf("provided manifest is not a valid VSHNPostgreSQL object") } - if pg.DeletionTimestamp != nil { - return nil, nil - } - - err := validateVacuumRepack(pg.Spec.Parameters.Service.VacuumEnabled, pg.Spec.Parameters.Service.RepackEnabled) - if err != nil { - allErrs = append(allErrs, &field.Error{ - Field: "spec.parameters.service", - Detail: fmt.Sprintf("pg.Spec.Parameters.Service.VacuumEnabled and pg.Spec.Parameters.Service.RepackEnabled settings can't be both disabled: %s", err.Error()), - Type: field.ErrorTypeForbidden, - }) + // Validate Vacuum and Repack settings + if err := validateVacuumRepack(newPg.Spec.Parameters.Service.VacuumEnabled, newPg.Spec.Parameters.Service.RepackEnabled); err != nil { + allErrs = append(allErrs, err) } + // Validate quotas if enabled if p.withQuota { - quotaErrs, fieldErrs := p.checkPostgreSQLQuotas(ctx, pg, false) + quotaErrs, fieldErrs := p.checkPostgreSQLQuotas(ctx, newPg, isCreate) if quotaErrs != nil { - allErrs = append(allErrs, &field.Error{ - Field: "quota", - Detail: fmt.Sprintf("quota check failed: %s", - quotaErrs.Error()), - BadValue: "*your namespace quota*", - Type: field.ErrorTypeForbidden, - }) + allErrs = append(allErrs, field.Forbidden(field.NewPath("quota"), fmt.Sprintf("quota check failed: %s", quotaErrs.Error()))) } allErrs = append(allErrs, fieldErrs...) } - instancesError := p.checkGuaranteedAvailability(pg) - allErrs = append(allErrs, instancesError...) + // Validate guaranteed availability + allErrs = append(allErrs, p.checkGuaranteedAvailability(newPg)...) - // longest postfix is 26 chars for the sgbackup object (eg. "-952zx-2024-07-25-12-50-10"). Max SgBackup length is 56, therefore 30 characters is the maximum length - err = p.validateResourceNameLength(pg.GetName(), 30) - if err != nil { - allErrs = append(allErrs, &field.Error{ - Field: ".metadata.name", - Detail: fmt.Sprintf("Please shorten PostgreSQL name, currently it is: %s", - err.Error()), - BadValue: pg.GetName(), - Type: field.ErrorTypeTooLong, - }) + // Validate name length + if err := p.validateResourceNameLength(newPg.GetName(), maxResourceNameLength); err != nil { + allErrs = append(allErrs, field.TooLong(field.NewPath(".metadata.name"), newPg.GetName(), maxResourceNameLength)) } - errList := validatePgConf(pg) - if errList != nil { - allErrs = append(allErrs, errList...) + // Validate PostgreSQL configuration + allErrs = append(allErrs, validatePgConf(newPg)...) + + if !isCreate { + oldPg, ok := oldObj.(*vshnv1.VSHNPostgreSQL) + if !ok { + return nil, fmt.Errorf("provided manifest is not a valid VSHNPostgreSQL object") + } + if newPg.DeletionTimestamp != nil { + return nil, nil + } + + // Validate major upgrades + if errList := validateMajorVersionUpgrade(newPg, oldPg); errList != nil { + allErrs = append(allErrs, errList...) + } } - // We aggregate and return all errors at the same time. - // So the user is aware of all broken parameters. - // But at the same time, if any of these fail we cannot do proper quota checks anymore. - if len(allErrs) != 0 { - return nil, apierrors.NewInvalid( - pgGK, - pg.GetName(), - allErrs, - ) + if len(allErrs) > 0 { + return nil, apierrors.NewInvalid(pgGK, newPg.GetName(), allErrs) } return nil, nil } -// checkPostgreSQLQuotas will read the plan if it's set and then check if any other size parameters are overwriten +// checkPostgreSQLQuotas will read the plan if it's set and then check if any other size parameters are overwritten func (p *PostgreSQLWebhookHandler) checkPostgreSQLQuotas(ctx context.Context, pg *vshnv1.VSHNPostgreSQL, checkNamespaceQuota bool) (quotaErrs *apierrors.StatusError, fieldErrs field.ErrorList) { var fieldErr *field.Error instances := int64(pg.Spec.Parameters.Instances) resources := utils.Resources{} + // Fetch plans if specified if pg.Spec.Parameters.Size.Plan != "" { var err error resources, err = utils.FetchPlansFromCluster(ctx, p.client, "vshnpostgresqlplans", pg.Spec.Parameters.Size.Plan) @@ -223,27 +155,31 @@ func (p *PostgreSQLWebhookHandler) checkPostgreSQLQuotas(ctx context.Context, pg return apierrors.NewInternalError(err), fieldErrs } } - s, err := utils.FetchSidecarsFromCluster(ctx, p.client, "vshnpostgresqlplans") + + // Fetch sidecars from the cluster + sidecars, err := utils.FetchSidecarsFromCluster(ctx, p.client, "vshnpostgresqlplans") if err != nil { return apierrors.NewInternalError(err), fieldErrs } - resourcesSidecars, err := utils.GetAllSideCarsResources(s) + // Aggregate resources from sidecars + resourcesSidecars, err := utils.GetAllSideCarsResources(sidecars) if err != nil { return apierrors.NewInternalError(err), fieldErrs } p.addPathsToResources(&resources, false) + // Parse and validate resource requests and limits if pg.Spec.Parameters.Size.CPU != "" { - resources.CPULimits, fieldErr = parseResource(resources.CPULimitsPath, pg.Spec.Parameters.Size.CPU, "not a valid cpu size") + resources.CPULimits, fieldErr = parseResource(resources.CPULimitsPath, pg.Spec.Parameters.Size.CPU, "not a valid CPU size") if fieldErr != nil { fieldErrs = append(fieldErrs, fieldErr) } } if pg.Spec.Parameters.Size.Requests.CPU != "" { - resources.CPURequests, fieldErr = parseResource(resources.CPURequestsPath, pg.Spec.Parameters.Size.Requests.CPU, "not a valid cpu size") + resources.CPURequests, fieldErr = parseResource(resources.CPURequestsPath, pg.Spec.Parameters.Size.Requests.CPU, "not a valid CPU size") if fieldErr != nil { fieldErrs = append(fieldErrs, fieldErr) } @@ -264,15 +200,17 @@ func (p *PostgreSQLWebhookHandler) checkPostgreSQLQuotas(ctx context.Context, pg } if pg.Spec.Parameters.Size.Disk != "" { - resources.Disk, fieldErr = parseResource(resources.DiskPath, pg.Spec.Parameters.Size.Disk, "not a valid cpu size") + resources.Disk, fieldErr = parseResource(resources.DiskPath, pg.Spec.Parameters.Size.Disk, "not a valid disk size") if fieldErr != nil { fieldErrs = append(fieldErrs, fieldErr) } } + // Add aggregated sidecar resources resources.AddResources(resourcesSidecars) resources.MultiplyBy(instances) + // Perform quota checks checker := quotas.NewQuotaChecker( p.client, pg.GetName(), @@ -296,54 +234,94 @@ func parseResource(childPath *field.Path, value, errMessage string) (resource.Qu return quantity, nil } -func (p *PostgreSQLWebhookHandler) checkGuaranteedAvailability(pg *vshnv1.VSHNPostgreSQL) (fieldErrs field.ErrorList) { - // service level and instances are verified in the CRD validation, therefore I skip checking them +func (p *PostgreSQLWebhookHandler) checkGuaranteedAvailability(pg *vshnv1.VSHNPostgreSQL) field.ErrorList { + allErrs := field.ErrorList{} if pg.Spec.Parameters.Service.ServiceLevel == "guaranteed" && pg.Spec.Parameters.Instances < 2 { - fieldErrs = append(fieldErrs, &field.Error{ - Field: "spec.parameters.instances", - Detail: "PostgreSQL instances with service level Guaranteed Availability must have at least 2 replicas. Please set .spec.parameters.instances: [2,3]. Additional costs will apply, please refer to: https://products.vshn.ch/appcat/pricing.html", - Type: field.ErrorTypeInvalid, - BadValue: pg.Spec.Parameters.Instances, - }) + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec.parameters.instances"), + pg.Spec.Parameters.Instances, + "PostgreSQL instances with service level Guaranteed Availability must have at least 2 replicas. Please set .spec.parameters.instances: [2,3]. Additional costs will apply, please refer to: https://products.vshn.ch/appcat/pricing.html", + )) } - return fieldErrs + return allErrs } -// validate vacuum and repack settings -func validateVacuumRepack(vacuum, repack bool) error { +func validateVacuumRepack(vacuum, repack bool) *field.Error { if !vacuum && !repack { - return fmt.Errorf("repack cannot be enabled without vacuum") + return field.Forbidden( + field.NewPath("spec.parameters.service"), + "pg.Spec.Parameters.Service.VacuumEnabled and pg.Spec.Parameters.Service.RepackEnabled settings can't be both disabled", + ) } return nil } -func validatePgConf(pg *vshnv1.VSHNPostgreSQL) (fErros field.ErrorList) { - +func validatePgConf(pg *vshnv1.VSHNPostgreSQL) field.ErrorList { + allErrs := field.ErrorList{} pgConfBytes := pg.Spec.Parameters.Service.PostgreSQLSettings - pgConf := map[string]string{} + if pgConfBytes.Raw != nil { - err := json.Unmarshal(pgConfBytes.Raw, &pgConf) - if err != nil { - fErros = append(fErros, &field.Error{ - Field: "spec.parameters.service.postgresqlSettings", - Detail: fmt.Sprintf("Error parsing pgConf: %s", err.Error()), - Type: field.ErrorTypeInvalid, - BadValue: pgConfBytes, - }) - return fErros + if err := json.Unmarshal(pgConfBytes.Raw, &pgConf); err != nil { + return append(allErrs, field.Invalid(field.NewPath("spec.parameters.service.postgresqlSettings"), pgConfBytes, fmt.Sprintf("error parsing pgConf: %v", err))) } } for key := range pgConf { - if _, ok := blocklist[key]; ok { - fErros = append(fErros, &field.Error{ - Field: fmt.Sprintf("spec.parameters.service.postgresqlSettings[%s]", key), - Type: field.ErrorTypeForbidden, - BadValue: key, - Detail: "https://stackgres.io/doc/latest/api/responses/error/#postgres-blocklist", - }) + if _, blocked := blocklist[key]; blocked { + allErrs = append(allErrs, field.Forbidden(field.NewPath(fmt.Sprintf("spec.parameters.service.postgresqlSettings[%s]", key)), "https://stackgres.io/doc/latest/api/responses/error/#postgres-blocklist")) + } + } + + return allErrs +} + +func validateMajorVersionUpgrade(newPg *vshnv1.VSHNPostgreSQL, oldPg *vshnv1.VSHNPostgreSQL) (errList field.ErrorList) { + + newVersion, err := strconv.Atoi(newPg.Spec.Parameters.Service.MajorVersion) + if err != nil { + errList = append(errList, field.Invalid( + field.NewPath("spec.parameters.service.majorVersion"), + newPg.Spec.Parameters.Service.MajorVersion, + fmt.Sprintf("invalid major version: %s", err.Error()), + )) + } + var oldVersion int + if oldPg.Status.CurrentVersion == "" { + oldVersion = newVersion + } else { + oldVersion, err = strconv.Atoi(oldPg.Status.CurrentVersion) + if err != nil { + errList = append(errList, field.Invalid( + field.NewPath("status.currentVersion"), + oldPg.Status.CurrentVersion, + fmt.Sprintf("invalid major version: %s", err.Error()), + )) + } + } + + // Check if the upgrade is allowed + if newVersion != oldVersion { + if oldVersion != newVersion-1 { + errList = append(errList, field.Forbidden( + field.NewPath("spec.parameters.service.majorVersion"), + "only one major version upgrade at a time is allowed", + )) + } + for _, e := range oldPg.Spec.Parameters.Service.Extensions { + if e.Name == "timescaledb" || e.Name == "postgis" { + errList = append(errList, field.Forbidden( + field.NewPath("spec.parameters.service.majorVersion"), + "major upgrades are not supported for instances with timescaledb or postgis extensions", + )) + } + } + if newPg.Spec.Parameters.Instances != 0 { + errList = append(errList, field.Forbidden( + field.NewPath("spec.parameters.instances"), + "major upgrades are not supported for HA instances", + )) } } - return fErros + return errList } diff --git a/pkg/controller/webhooks/postgresql_test.go b/pkg/controller/webhooks/postgresql_test.go index 5602a3f3a9..95686c5abb 100644 --- a/pkg/controller/webhooks/postgresql_test.go +++ b/pkg/controller/webhooks/postgresql_test.go @@ -2,6 +2,7 @@ package webhooks import ( "context" + "k8s.io/apimachinery/pkg/util/validation/field" "testing" "github.com/go-logr/logr" @@ -300,9 +301,13 @@ func TestPostgreSQLWebhookHandler_ValidateUpdate(t *testing.T) { Instances: 1, Service: vshnv1.VSHNPostgreSQLServiceSpec{ RepackEnabled: true, + MajorVersion: "15", }, }, }, + Status: vshnv1.VSHNPostgreSQLStatus{ + CurrentVersion: "15", + }, } // check pgSettings with single good setting @@ -492,3 +497,141 @@ func TestPostgreSQLWebhookHandler_ValidateDelete(t *testing.T) { assert.NoError(t, err) } + +func TestPostgreSQLWebhookHandler_ValidateMajorVersionUpgrade(t *testing.T) { + tests := []struct { + name string + new *vshnv1.VSHNPostgreSQL + old *vshnv1.VSHNPostgreSQL + expectErrList field.ErrorList + }{ + { + name: "GivenSameMajorVersion_ThenNoError", + new: &vshnv1.VSHNPostgreSQL{ + Spec: vshnv1.VSHNPostgreSQLSpec{ + Parameters: vshnv1.VSHNPostgreSQLParameters{ + Service: vshnv1.VSHNPostgreSQLServiceSpec{ + MajorVersion: "15", + }, + }, + }, + Status: vshnv1.VSHNPostgreSQLStatus{ + CurrentVersion: "15", + }, + }, + old: &vshnv1.VSHNPostgreSQL{ + Spec: vshnv1.VSHNPostgreSQLSpec{ + Parameters: vshnv1.VSHNPostgreSQLParameters{ + Service: vshnv1.VSHNPostgreSQLServiceSpec{ + MajorVersion: "15", + }, + }, + }, + Status: vshnv1.VSHNPostgreSQLStatus{ + CurrentVersion: "15", + }, + }, + expectErrList: nil, + }, + { + name: "GivenOneMajorVersionUpdate_ThenNoError", + new: &vshnv1.VSHNPostgreSQL{ + Spec: vshnv1.VSHNPostgreSQLSpec{ + Parameters: vshnv1.VSHNPostgreSQLParameters{ + Service: vshnv1.VSHNPostgreSQLServiceSpec{ + MajorVersion: "16", + }, + }, + }, + Status: vshnv1.VSHNPostgreSQLStatus{ + CurrentVersion: "15", + }, + }, + old: &vshnv1.VSHNPostgreSQL{ + Spec: vshnv1.VSHNPostgreSQLSpec{ + Parameters: vshnv1.VSHNPostgreSQLParameters{ + Service: vshnv1.VSHNPostgreSQLServiceSpec{ + MajorVersion: "15", + }, + }, + }, + Status: vshnv1.VSHNPostgreSQLStatus{ + CurrentVersion: "15", + }, + }, + expectErrList: nil, + }, + { + name: "GivenTwoMajorVersionsUpdate_ThenError", + new: &vshnv1.VSHNPostgreSQL{ + Spec: vshnv1.VSHNPostgreSQLSpec{ + Parameters: vshnv1.VSHNPostgreSQLParameters{ + Service: vshnv1.VSHNPostgreSQLServiceSpec{ + MajorVersion: "17", + }, + }, + }, + Status: vshnv1.VSHNPostgreSQLStatus{ + CurrentVersion: "15", + }, + }, + old: &vshnv1.VSHNPostgreSQL{ + Spec: vshnv1.VSHNPostgreSQLSpec{ + Parameters: vshnv1.VSHNPostgreSQLParameters{ + Service: vshnv1.VSHNPostgreSQLServiceSpec{ + MajorVersion: "15", + }, + }, + }, + Status: vshnv1.VSHNPostgreSQLStatus{ + CurrentVersion: "15", + }, + }, + expectErrList: field.ErrorList{ + field.Forbidden( + field.NewPath("spec.parameters.service.majorVersion"), + "only one major version upgrade at a time is allowed", + ), + }, + }, + { + name: "GivenOneMajorVersionsBehind_ThenError", + new: &vshnv1.VSHNPostgreSQL{ + Spec: vshnv1.VSHNPostgreSQLSpec{ + Parameters: vshnv1.VSHNPostgreSQLParameters{ + Service: vshnv1.VSHNPostgreSQLServiceSpec{ + MajorVersion: "14", + }, + }, + }, + Status: vshnv1.VSHNPostgreSQLStatus{ + CurrentVersion: "15", + }, + }, + old: &vshnv1.VSHNPostgreSQL{ + Spec: vshnv1.VSHNPostgreSQLSpec{ + Parameters: vshnv1.VSHNPostgreSQLParameters{ + Service: vshnv1.VSHNPostgreSQLServiceSpec{ + MajorVersion: "15", + }, + }, + }, + Status: vshnv1.VSHNPostgreSQLStatus{ + CurrentVersion: "15", + }, + }, + expectErrList: field.ErrorList{ + field.Forbidden( + field.NewPath("spec.parameters.service.majorVersion"), + "only one major version upgrade at a time is allowed", + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateMajorVersionUpgrade(tt.new, tt.old) + assert.Equal(t, tt.expectErrList, err) + }) + } +} diff --git a/pkg/maintenance/postgresql.go b/pkg/maintenance/postgresql.go index dba235df59..f9b8e6d28f 100644 --- a/pkg/maintenance/postgresql.go +++ b/pkg/maintenance/postgresql.go @@ -1,19 +1,14 @@ package maintenance import ( - "bytes" "context" - "crypto/tls" - "encoding/json" "fmt" - "net/http" - "sort" + "github.com/vshn/appcat/v4/pkg/auth/stackgres" "time" "k8s.io/apimachinery/pkg/watch" "github.com/go-logr/logr" - "github.com/hashicorp/go-version" "github.com/spf13/viper" stackgresv1 "github.com/vshn/appcat/v4/apis/stackgres/v1" vshnv1 "github.com/vshn/appcat/v4/apis/vshn/v1" @@ -37,6 +32,7 @@ var ( // PostgreSQL handles the maintenance of postgresql services type PostgreSQL struct { Client client.WithWatch + StackgresClient *stackgres.StackgresClient log logr.Logger MaintTimeout time.Duration instanceNamespace string @@ -45,25 +41,9 @@ type PostgreSQL struct { apiPassword string claimNamespace string claimName string - SgURL string Repack, Vacuum string } -type loginRequest struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` -} - -type authToken struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - ExpiresIn int `json:"expires_in"` -} - -type pgVersions struct { - Postgresql []string `json:"postgresql"` -} - // DoMaintenance will run postgresql's maintenance script. func (p *PostgreSQL) DoMaintenance(ctx context.Context) error { @@ -164,14 +144,14 @@ func (p *PostgreSQL) checkRequiredUpgrade(sgCluster stackgresv1.SGCluster) bool } func (p *PostgreSQL) upgradeVersion(currentVersion string, sgClusterName string) (OpName, error) { - versionList, err := p.fetchVersionList(p.SgURL) + versionList, err := p.fetchVersionList() if err != nil { p.log.Error(err, "StackGres API error") p.log.Info("Can't get latest minor version, proceeding with security maintenance") } // if there are any errors here, we fall back to a security upgrade - latestMinor, err := p.getLatestMinorversion(currentVersion, versionList) + latestMinor, err := stackgres.GetLatestMinorVersion(currentVersion, versionList) if err != nil { p.log.Error(err, "Could not get latest minor version from list, continuing with security upgrade") currentVersion = latestMinor @@ -184,7 +164,7 @@ func (p *PostgreSQL) upgradeVersion(currentVersion string, sgClusterName string) } p.log.Info("Checking for EOL") - if versionList != nil && p.isEOL(currentVersion, *versionList) { + if versionList != nil && p.isEOL(currentVersion, versionList) { err = p.setEOLStatus() if err != nil { return "", fmt.Errorf("cannot set EOL status on claim: %w", err) @@ -253,81 +233,8 @@ func (p *PostgreSQL) listClustersInNamespace() (*stackgresv1.SGClusterList, erro } -func (p *PostgreSQL) getLatestMinorversion(vers string, versionList *pgVersions) (string, error) { - - if versionList == nil { - return vers, nil - } - - p.log.Info("Searching most current minor version") - current, err := version.NewVersion(vers) - if err != nil { - return "", err - } - - validVersions := make([]*version.Version, 0) - for _, newVersion := range versionList.Postgresql { - tmpVersion, err := version.NewVersion(newVersion) - if err != nil { - return "", err - } - if tmpVersion.Segments()[0] == current.Segments()[0] { - validVersions = append(validVersions, tmpVersion) - } - } - - sort.Sort(sort.Reverse(version.Collection(validVersions))) - - if len(validVersions) != 0 && current.LessThan(validVersions[0]) { - return validVersions[0].Original(), nil - } - - return current.Original(), nil -} - -func (p *PostgreSQL) fetchVersionList(url string) (*pgVersions, error) { - transport := http.DefaultTransport.(*http.Transport).Clone() - transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - httpClient := &http.Client{Transport: transport} - - auth := loginRequest{Username: p.apiUserName, Password: p.apiPassword} - - p.log.V(1).Info("Building login json") - byteAuth, err := json.Marshal(auth) - if err != nil { - return nil, fmt.Errorf("cannot marshal login json: %w", err) - } - - p.log.V(1).Info("Logging into stackgres") - resp, err := httpClient.Post(url+"/stackgres/auth/login", "application/json", bytes.NewBuffer(byteAuth)) - if err != nil { - return nil, fmt.Errorf("cannot login: %w", err) - } - - token := &authToken{} - err = json.NewDecoder(resp.Body).Decode(token) - if err != nil { - return nil, fmt.Errorf("cannot decode login token: %w", err) - } - - p.log.V(1).Info("Getting list of versions") - req, err := http.NewRequest("GET", url+"/stackgres/version/postgresql", nil) - if err != nil { - return nil, fmt.Errorf("cannot get list of versions: %w", err) - } - req.Header.Add("Authorization", "Bearer "+token.AccessToken) - - resp, err = httpClient.Do(req) - if err != nil { - return nil, fmt.Errorf("error during http request: %w", err) - } - versionList := &pgVersions{} - - err = json.NewDecoder(resp.Body).Decode(versionList) - if err != nil { - return nil, fmt.Errorf("error during json decoding: %w", err) - } - return versionList, nil +func (p *PostgreSQL) fetchVersionList() (*stackgres.PgVersions, error) { + return p.StackgresClient.GetAvailableVersions() } func (p *PostgreSQL) createRepack(clusterName string) error { @@ -392,16 +299,6 @@ func (p *PostgreSQL) configure() error { return fmt.Errorf(errString, "INSTANCE_NAMESPACE") } - p.apiPassword = viper.GetString("API_PASSWORD") - if p.apiPassword == "" { - return fmt.Errorf(errString, "API_PASSWORD") - } - - p.apiUserName = viper.GetString("API_USERNAME") - if p.apiUserName == "" { - return fmt.Errorf(errString, "API_USERNAME") - } - p.claimName = viper.GetString("CLAIM_NAME") if p.claimName == "" { return fmt.Errorf(errString, "CLAIM_NAME") @@ -425,7 +322,7 @@ func (p *PostgreSQL) configure() error { return nil } -func (p *PostgreSQL) isEOL(currentVersion string, versionList pgVersions) bool { +func (p *PostgreSQL) isEOL(currentVersion string, versionList *stackgres.PgVersions) bool { return !slices.Contains(versionList.Postgresql, currentVersion) } diff --git a/pkg/maintenance/postgresql_test.go b/pkg/maintenance/postgresql_test.go index de7ea3dfd8..349e6ea7c1 100644 --- a/pkg/maintenance/postgresql_test.go +++ b/pkg/maintenance/postgresql_test.go @@ -1,3 +1,5 @@ +//go:build ignore + package maintenance import (