From 05d9c031e3d7d1ac30042b71aa769a77c60b072c Mon Sep 17 00:00:00 2001 From: Josh Carp Date: Fri, 17 Jan 2025 11:23:54 -0500 Subject: [PATCH] Check in initial e2e tests for cloud operator. Add an e2e test that installs the operator, then a crdbcluster, and makes assertions about basic crdb functionality. --- go.mod | 2 +- go.sum | 4 +- .../templates/cockroachdb-operator-certs.yaml | 2 +- .../e2e/install/cockroachdb_helm_e2e_test.go | 138 ++++++++++- tests/k3d/dev-cluster.sh | 222 +++++++++--------- tests/testutil/require.go | 125 +++++++++- 6 files changed, 358 insertions(+), 135 deletions(-) diff --git a/go.mod b/go.mod index 0329b606..0daca166 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/cenkalti/backoff v2.2.1+incompatible github.com/cockroachdb/cockroach-operator v0.0.0-20230531051823-2cb3e2e676f4 github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible - github.com/gruntwork-io/terratest v0.41.19 + github.com/gruntwork-io/terratest v0.41.26 github.com/mitchellh/hashstructure/v2 v2.0.2 github.com/pkg/errors v0.9.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.51.2 diff --git a/go.sum b/go.sum index 9e61c58b..7896e1e8 100644 --- a/go.sum +++ b/go.sum @@ -342,8 +342,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro= github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78= -github.com/gruntwork-io/terratest v0.41.19 h1:SCqYF28nHZuBlX+jZ+QI4bK+OLpPR8BgcxVUQxyQSEw= -github.com/gruntwork-io/terratest v0.41.19/go.mod h1:O6gajNBjO1wvc7Wl9WtbO+ORcdnhAV2GQiBE71ycwIk= +github.com/gruntwork-io/terratest v0.41.26 h1:ttDXBBDBAYV4KgP1itGQ5O61F6KwgMMUFHy64bzvuYU= +github.com/gruntwork-io/terratest v0.41.26/go.mod h1:O6gajNBjO1wvc7Wl9WtbO+ORcdnhAV2GQiBE71ycwIk= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= diff --git a/operator/templates/cockroachdb-operator-certs.yaml b/operator/templates/cockroachdb-operator-certs.yaml index 812235ae..ba346aa5 100644 --- a/operator/templates/cockroachdb-operator-certs.yaml +++ b/operator/templates/cockroachdb-operator-certs.yaml @@ -8,4 +8,4 @@ type: Opaque data: {{ index $operatorCerts 0 }} {{ index $operatorCerts 1 }} - {{ index $operatorCerts 2 }} \ No newline at end of file + {{ index $operatorCerts 2 }} diff --git a/tests/e2e/install/cockroachdb_helm_e2e_test.go b/tests/e2e/install/cockroachdb_helm_e2e_test.go index aa2ce929..edb131b7 100644 --- a/tests/e2e/install/cockroachdb_helm_e2e_test.go +++ b/tests/e2e/install/cockroachdb_helm_e2e_test.go @@ -2,10 +2,9 @@ package integration import ( "context" + "encoding/json" "fmt" "io/fs" - v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/types" "log" "os" "path/filepath" @@ -22,23 +21,136 @@ import ( "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" + "github.com/gruntwork-io/terratest/modules/retry" "github.com/gruntwork-io/terratest/modules/shell" "github.com/stretchr/testify/require" + v1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" _ "k8s.io/client-go/plugin/pkg/client/auth" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) var ( - cfg = ctrl.GetConfigOrDie() - k8sClient, _ = client.New(cfg, client.Options{}) - releaseName = "crdb-test" - customCASecret = "custom-ca-secret" - helmChartPath, _ = filepath.Abs("../../../cockroachdb") + cfg = ctrl.GetConfigOrDie() + k8sClient, _ = client.New(cfg, client.Options{}) + releaseName = "crdb-test" + operatorReleaseName = "crdb-operator-test" + customCASecret = "custom-ca-secret" + helmChartPath, _ = filepath.Abs("../../../cockroachdb") + operatorChartPath, _ = filepath.Abs("../../../operator") + skipCleanup = os.Getenv("SKIP_CLEANUP") != "" ) const role = "crdb-test-cockroachdb-node-reader" +func mustMarshalJson(value interface{}) string { + out, err := json.Marshal(value) + if err != nil { + panic(err) + } + return string(out) +} + +func TestCockroachDBOperator(t *testing.T) { + namespaceName := fmt.Sprintf("cockroach-%s", strings.ToLower(t.Name())) + kubectlOptions := k8s.NewKubectlOptions("", "", namespaceName) + + k8s.CreateNamespace(t, kubectlOptions, namespaceName) + if !skipCleanup { + defer k8s.DeleteNamespace(t, kubectlOptions, namespaceName) + } + + const testDBName = "testdb" + + extraArgs := map[string][]string{ + "install": { + "--wait", + "--debug", + }, + } + + crdbCluster := testutil.CockroachCluster{ + Cfg: cfg, + K8sClient: k8sClient, + StatefulSetName: fmt.Sprintf("%s-cockroachdb", releaseName), + Namespace: namespaceName, + ClientSecret: fmt.Sprintf("%s-cockroachdb-client-secret", releaseName), + NodeSecret: fmt.Sprintf("%s-cockroachdb-node-secret", releaseName), + CaSecret: fmt.Sprintf("%s-cockroachdb-ca-secret", releaseName), + IsCaUserProvided: false, + DesiredNodes: 1, + } + + // Deploy operator + operatorOpts := &helm.Options{ + KubectlOptions: kubectlOptions, + ExtraArgs: extraArgs, + } + helm.Install(t, operatorOpts, operatorChartPath, operatorReleaseName) + if !skipCleanup { + defer cleanupResources( + t, + operatorReleaseName, + kubectlOptions, + operatorOpts, + []string{}, + ) + } + + // Wait for crd to be installed + k8s.WaitUntilServiceAvailable(t, kubectlOptions, "cockroach-operator", 30, 2*time.Second) + retry.DoWithRetryE(t, "wait-for-crd", 60, time.Second*5, func() (string, error) { + return k8s.RunKubectlAndGetOutputE(t, operatorOpts.KubectlOptions, "get", "crd", "crdbclusters.crdb.cockroachlabs.com") + }) + + // Deploy crdb + crdbOpts := &helm.Options{ + KubectlOptions: kubectlOptions, + SetValues: patchHelmValues(map[string]string{ + "operator.enabled": "true", + "operator.dataStore.volumeClaimTemplate.spec.resources.requests.storage": "1Gi", + }), + SetJsonValues: map[string]string{ + "operator.regions": mustMarshalJson([]map[string]interface{}{ + { + "code": "us-east-1", + "cloudProvider": "k3d", + "nodes": crdbCluster.DesiredNodes, + "namespace": namespaceName, + }, + }), + }, + ExtraArgs: extraArgs, + } + helm.Install(t, crdbOpts, helmChartPath, releaseName) + if !skipCleanup { + defer cleanupResources( + t, + releaseName, + kubectlOptions, + crdbOpts, + []string{}, + ) + } + + serviceName := fmt.Sprintf("%s-cockroachdb-public", releaseName) + k8s.WaitUntilServiceAvailable(t, kubectlOptions, serviceName, 30, 2*time.Second) + + testutil.RequireCertificatesToBeValid(t, crdbCluster) + testutil.RequireCRDBClusterToBeReadyTimeout(t, kubectlOptions, crdbCluster, 600*time.Second) + + pods := k8s.ListPods(t, kubectlOptions, metav1.ListOptions{ + LabelSelector: "app=cockroachdb", + }) + require.True(t, len(pods) > 0) + podName := fmt.Sprintf("%s.%s-cockroachdb", pods[0].Name, releaseName) + + testutil.RequireCRDBClusterToFunction(t, crdbCluster, false, podName) + testutil.RequireCRDBDatabaseToFunction(t, crdbCluster, testDBName, podName) +} + func TestCockroachDbHelmInstall(t *testing.T) { namespaceName := "cockroach" + strings.ToLower(random.UniqueId()) kubectlOptions := k8s.NewKubectlOptions("", "", namespaceName) @@ -216,8 +328,10 @@ func TestCockroachDbHelmMigration(t *testing.T) { cmdCa := shell.Command{ Command: "cockroach", - Args: []string{"cert", "create-ca", fmt.Sprintf("--certs-dir=%s", certsDir), - fmt.Sprintf("--ca-key=%s/ca.key", certsDir)}, + Args: []string{ + "cert", "create-ca", fmt.Sprintf("--certs-dir=%s", certsDir), + fmt.Sprintf("--ca-key=%s/ca.key", certsDir), + }, WorkingDir: ".", Env: nil, Logger: nil, @@ -247,8 +361,10 @@ func TestCockroachDbHelmMigration(t *testing.T) { cmdClient := shell.Command{ Command: "cockroach", - Args: []string{"cert", "create-client", security.RootUser, fmt.Sprintf("--certs-dir=%s", certsDir), - fmt.Sprintf("--ca-key=%s/ca.key", certsDir)}, + Args: []string{ + "cert", "create-client", security.RootUser, fmt.Sprintf("--certs-dir=%s", certsDir), + fmt.Sprintf("--ca-key=%s/ca.key", certsDir), + }, WorkingDir: ".", Env: nil, Logger: nil, diff --git a/tests/k3d/dev-cluster.sh b/tests/k3d/dev-cluster.sh index a3fc751f..61f8a666 100755 --- a/tests/k3d/dev-cluster.sh +++ b/tests/k3d/dev-cluster.sh @@ -22,16 +22,16 @@ REPOSITORY="cockroachlabs-helm-charts/cockroach-self-signer-cert" # Required container images for the cluster. # These images are imported into the cluster during creation. REQUIRED_IMAGES=( - "quay.io/jetstack/cert-manager-cainjector:v1.11.0" - "quay.io/jetstack/cert-manager-webhook:v1.11.0" - "quay.io/jetstack/cert-manager-controller:v1.11.0" - "quay.io/jetstack/cert-manager-ctl:v1.11.0" - "cockroachdb/cockroach:v25.1.0" - "${REGISTRY}/${REPOSITORY}:$(bin/yq '.tls.selfSigner.image.tag' ./cockroachdb/values.yaml)" + "quay.io/jetstack/cert-manager-cainjector:v1.11.0" + "quay.io/jetstack/cert-manager-webhook:v1.11.0" + "quay.io/jetstack/cert-manager-controller:v1.11.0" + "quay.io/jetstack/cert-manager-ctl:v1.11.0" + "cockroachdb/cockroach:v25.1.0" + "${REGISTRY}/${REPOSITORY}:$(bin/yq '.tls.selfSigner.image.tag' ./cockroachdb/values.yaml)" ) usage() { - cat << EOF + cat < [options] Commands: @@ -46,12 +46,12 @@ Options: --region Region for node labels (default: ${DEFAULT_REGION}) --zones Number of zones (default: ${DEFAULT_ZONES}) EOF - exit 1 + exit 1 } # Validate input if [ $# -eq 0 ]; then - usage + usage fi COMMAND="${1}" @@ -59,126 +59,126 @@ shift # Parse command line arguments while [ $# -gt 0 ]; do - if [[ $1 == *"--"* ]]; then - param="${1/--/}" - declare "$param"="$2" - shift 2 - else - shift - fi + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare "$param"="$2" + shift 2 + else + shift + fi done # Set defaults if not provided name=${name:-$DEFAULT_CLUSTER_NAME} network_name=${network_name:-"k3d-${name}"} -nodes=${cluster_size:-$DEFAULT_NODES} +nodes=${cluster_size:-$DEFAULT_NODES} version=${version:-$DEFAULT_K8S_VERSION} region=${region:-$DEFAULT_REGION} zones=${zones:-$DEFAULT_ZONES} create_cluster() { - local cluster_name="${name}-cluster" - - # Check if cluster already exists - if "${K3D_PATH}" cluster list --output name | grep -q "${cluster_name}"; then - echo "Cluster '${cluster_name}' already exists. Skipping creation." - return 0 - fi - - echo "Creating cluster '${cluster_name}'..." - - # K3d cluster configuration - local k3d_args=( - "${K3D_PATH}" cluster create "${cluster_name}" -# --subnet ${subnet} - --no-lb -# --servers-memory 2GB -# --k3s-arg "--service-cidr=${svc}@server:0" -# --k3s-arg "--cluster-cidr=${cluster}@server:0" - --k3s-arg "--disable=traefik@server:0" -# --k3s-arg "--disable-network-policy@server:0" -# --k3s-arg "--disable=servicelb@server:0" -# --k3s-arg "--disable=coredns@server:0" -# --k3s-arg "--flannel-backend=none@server:0" - --k3s-arg="--tls-san=host.k3d.internal@server:*" -# --k3s-arg "--disable=kube-proxy@server:0" - --runtime-ulimit "nofile=65535:1048576" -# --agents-memory 2GB - --network "${network_name}" # Use the same network name for pulling images from local registries -# --registry-config "$SCRIPT_DIR/registries.yaml" # Use this flag if k3s containers need access to local registries -# --agents ${nodes} # Number of agent nodes # Use this Add more worker nodes - ) - - if ! "${k3d_args[@]}"; then - echo "Error creating K3D cluster: ${cluster_name}" - return 1 - fi - - configure_node_labels "${cluster_name}" - import_container_images "${cluster_name}" - - echo "Cluster '${cluster_name}' created and configured successfully" + local cluster_name="${name}-cluster" + + # Check if cluster already exists + if "${K3D_PATH}" cluster list --output name | grep -q "${cluster_name}"; then + echo "Cluster '${cluster_name}' already exists. Skipping creation." + return 0 + fi + + echo "Creating cluster '${cluster_name}'..." + + # K3d cluster configuration + local k3d_args=( + "${K3D_PATH}" cluster create "${cluster_name}" + # --subnet ${subnet} + --no-lb + # --servers-memory 2GB + # --k3s-arg "--service-cidr=${svc}@server:0" + # --k3s-arg "--cluster-cidr=${cluster}@server:0" + --k3s-arg "--disable=traefik@server:0" + # --k3s-arg "--disable-network-policy@server:0" + # --k3s-arg "--disable=servicelb@server:0" + # --k3s-arg "--disable=coredns@server:0" + # --k3s-arg "--flannel-backend=none@server:0" + --k3s-arg="--tls-san=host.k3d.internal@server:*" + # --k3s-arg "--disable=kube-proxy@server:0" + --runtime-ulimit "nofile=65535:1048576" + # --agents-memory 2GB + --network "${network_name}" # Use the same network name for pulling images from local registries + # --registry-config "$SCRIPT_DIR/registries.yaml" # Use this flag if k3s containers need access to local registries + # --agents ${nodes} # Number of agent nodes # Use this Add more worker nodes + ) + + if ! "${k3d_args[@]}"; then + echo "Error creating K3D cluster: ${cluster_name}" + return 1 + fi + + configure_node_labels "${cluster_name}" + import_container_images "${cluster_name}" + + echo "Cluster '${cluster_name}' created and configured successfully" } # Label the nodes with region and zone topology labels. configure_node_labels() { - local cluster_name="$1" - local zones_suffix=(a b c d e f g h i j k l m n o p q r s t u v w x y z) - - # Get all nodes in the cluster - local nodes - nodes=$(kubectl --context "k3d-${cluster_name}" get nodes -o jsonpath='{.items[*].metadata.name}') - - local index=0 - for node in $nodes; do - local zone="${region}${zones_suffix[$((index % zones))]}" - kubectl --context "k3d-${cluster_name}" label node "$node" \ - "topology.kubernetes.io/region=${region}" \ - "topology.kubernetes.io/zone=${zone}" - index=$((index + 1)) - done + local cluster_name="$1" + local zones_suffix=(a b c d e f g h i j k l m n o p q r s t u v w x y z) + + # Get all nodes in the cluster + local nodes + nodes=$(kubectl --context "k3d-${cluster_name}" get nodes -o jsonpath='{.items[*].metadata.name}') + + local index=0 + for node in $nodes; do + local zone="${region}${zones_suffix[$((index % zones))]}" + kubectl --context "k3d-${cluster_name}" label node "$node" \ + "topology.kubernetes.io/region=${region}" \ + "topology.kubernetes.io/zone=${zone}" + index=$((index + 1)) + done } # Pull images that don't exist locally and import them into the k3d cluster. import_container_images() { - local cluster_name="$1" - echo "Pulling and importing required container images..." - for image in "${REQUIRED_IMAGES[@]}"; do - if ! docker image inspect "$image" >/dev/null 2>&1; then - echo "Pulling image: $image" - docker pull "$image" - fi - done - "${K3D_PATH}" image import "${REQUIRED_IMAGES[@]}" -c "${cluster_name}" + local cluster_name="$1" + echo "Pulling and importing required container images..." + for image in "${REQUIRED_IMAGES[@]}"; do + if ! docker image inspect "$image" >/dev/null 2>&1; then + echo "Pulling image: $image" + docker pull "$image" + fi + done + "${K3D_PATH}" image import "${REQUIRED_IMAGES[@]}" -c "${cluster_name}" } case $COMMAND in - up) - echo "Creating cluster..." - if ! create_cluster; then - echo "Cluster creation failed" - exit 1 - fi - - # Wait for cluster to be ready - echo "Waiting for cluster to be ready..." - max_retries=30 - retry_count=0 - while ! kubectl --context "k3d-${name}-cluster" get nodes &>/dev/null; do - if [ $retry_count -ge $max_retries ]; then - echo "Timed out waiting for cluster to be ready" - exit 1 - fi - sleep 2 - ((retry_count++)) - done - echo "Cluster is ready" - ;; - down) - "${K3D_PATH}" cluster delete "${name}-cluster" - ;; - *) - echo "Error: Unknown command: $COMMAND" - usage - ;; -esac \ No newline at end of file +up) + echo "Creating cluster..." + if ! create_cluster; then + echo "Cluster creation failed" + exit 1 + fi + + # Wait for cluster to be ready + echo "Waiting for cluster to be ready..." + max_retries=30 + retry_count=0 + while ! kubectl --context "k3d-${name}-cluster" get nodes &>/dev/null; do + if [ $retry_count -ge $max_retries ]; then + echo "Timed out waiting for cluster to be ready" + exit 1 + fi + sleep 2 + ((retry_count++)) + done + echo "Cluster is ready" + ;; +down) + "${K3D_PATH}" cluster delete "${name}-cluster" + ;; +*) + echo "Error: Unknown command: $COMMAND" + usage + ;; +esac diff --git a/tests/testutil/require.go b/tests/testutil/require.go index 46bbe0b1..875dab43 100644 --- a/tests/testutil/require.go +++ b/tests/testutil/require.go @@ -35,13 +35,12 @@ type CockroachCluster struct { ClientSecret, NodeSecret string CaSecret string IsCaUserProvided bool + DesiredNodes int } // RequireClusterToBeReadyEventuallyTimeout waits for all the CRDB pods to come into running state. func RequireClusterToBeReadyEventuallyTimeout(t *testing.T, crdbCluster CockroachCluster, timeout time.Duration) { - err := wait.Poll(10*time.Second, timeout, func() (bool, error) { - ss, err := fetchStatefulSet(crdbCluster.K8sClient, crdbCluster.StatefulSetName, crdbCluster.Namespace) if err != nil { t.Logf("error fetching stateful set") @@ -63,6 +62,29 @@ func RequireClusterToBeReadyEventuallyTimeout(t *testing.T, crdbCluster Cockroac require.NoError(t, err) } +func RequireCRDBClusterToBeReadyTimeout(t *testing.T, opts *k8s.KubectlOptions, crdbCluster CockroachCluster, timeout time.Duration) { + err := wait.Poll(10*time.Second, timeout, func() (bool, error) { + pods, err := k8s.ListPodsE(t, opts, metav1.ListOptions{ + LabelSelector: "app=cockroachdb", + }) + if len(pods) != crdbCluster.DesiredNodes { + t.Logf("expected %d crdb pods; found %d", crdbCluster.DesiredNodes, len(pods)) + return false, nil + } + for _, pod := range pods { + if !k8s.IsPodAvailable(&pod) { + t.Logf("pod %s not ready", pod.Name) + return false, nil + } + } + if err != nil { + return false, err + } + return true, nil + }) + require.NoError(t, err) +} + func logPods(ctx context.Context, sts *appsv1.StatefulSet, cfg *rest.Config, t *testing.T) { // create a new clientset to talk to k8s clientset, err := kubernetes.NewForConfig(cfg) @@ -73,7 +95,7 @@ func logPods(ctx context.Context, sts *appsv1.StatefulSet, cfg *rest.Config, t * // the LableSelector I thought worked did not // so I just get all of the Pods in a NS options := metav1.ListOptions{ - //LabelSelector: "app=" + cluster.StatefulSetName(), + // LabelSelector: "app=" + cluster.StatefulSetName(), } // Get all pods @@ -116,9 +138,14 @@ func statefulSetIsReady(ss *appsv1.StatefulSet) bool { return ss.Status.ReadyReplicas == ss.Status.Replicas } -func getDBConn(t *testing.T, crdbCluster CockroachCluster, dbName string) *sql.DB { +func getDBConn(t *testing.T, crdbCluster CockroachCluster, dbName string, podName string) *sql.DB { isSecure := crdbCluster.CaSecret != "" sqlPort := int32(26257) + + serviceName := podName + if serviceName == "" { + serviceName = fmt.Sprintf("%s-0.%s", crdbCluster.StatefulSetName, crdbCluster.StatefulSetName) + } conn := &database.DBConnection{ Ctx: context.TODO(), Client: crdbCluster.K8sClient, @@ -126,7 +153,7 @@ func getDBConn(t *testing.T, crdbCluster CockroachCluster, dbName string) *sql.D UseSSL: isSecure, RestConfig: crdbCluster.Cfg, - ServiceName: fmt.Sprintf("%s-0.%s", crdbCluster.StatefulSetName, crdbCluster.StatefulSetName), + ServiceName: serviceName, Namespace: crdbCluster.Namespace, DatabaseName: dbName, @@ -146,7 +173,26 @@ func getDBConn(t *testing.T, crdbCluster CockroachCluster, dbName string) *sql.D // RequireDatabaseToFunction creates a table and insert two rows. func RequireDatabaseToFunction(t *testing.T, crdbCluster CockroachCluster, dbName string) { - db := getDBConn(t, crdbCluster, dbName) + db := getDBConn(t, crdbCluster, dbName, "") + if _, err := db.Exec("CREATE TABLE IF NOT EXISTS accounts (id INT PRIMARY KEY, balance INT)"); err != nil { + t.Fatal(err) + } + + // Insert two rows into the "accounts" table. + if _, err := db.Exec( + "INSERT INTO accounts (id, balance) VALUES (1, 1000), (2, 250)"); err != nil { + t.Fatal(err) + } +} + +func RequireCRDBDatabaseToFunction(t *testing.T, crdbCluster CockroachCluster, dbName string, podName string) { + // TODO doc + systemDB := getDBConn(t, crdbCluster, "system", podName) + if _, err := systemDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", dbName)); err != nil { + t.Fatal(err) + } + + db := getDBConn(t, crdbCluster, dbName, podName) if _, err := db.Exec("CREATE TABLE IF NOT EXISTS accounts (id INT PRIMARY KEY, balance INT)"); err != nil { t.Fatal(err) } @@ -161,7 +207,7 @@ func RequireDatabaseToFunction(t *testing.T, crdbCluster CockroachCluster, dbNam // RequireCRDBToFunction creates a database, a table and insert two rows if it is a fresh install of the cluster. // If certificate is rotated and cluster rolling restart has happened, this will check that existing two rows are present. func RequireCRDBToFunction(t *testing.T, crdbCluster CockroachCluster, rotate bool) { - db := getDBConn(t, crdbCluster, "system") + db := getDBConn(t, crdbCluster, "system", "") if rotate { t.Log("Verifying the existing data in the database after certificate rotation") @@ -169,7 +215,67 @@ func RequireCRDBToFunction(t *testing.T, crdbCluster CockroachCluster, rotate bo // Create database only if we are testing crdb install if !rotate { - if _, err := db.Exec("CREATE DATABASE test_db"); err != nil { + if _, err := db.Exec("CREATE DATABASE IF NOT EXISTS test_db"); err != nil { + t.Fatal(err) + } + } + + if _, err := db.Exec("USE test_db"); err != nil { + t.Fatal(err) + } + + // Create and insert into table only for the crdb install + if !rotate { + // Create the "accounts" table. + if _, err := db.Exec("CREATE TABLE IF NOT EXISTS accounts (id INT PRIMARY KEY, balance INT)"); err != nil { + t.Fatal(err) + } + + // Insert two rows into the "accounts" table. + if _, err := db.Exec( + "INSERT INTO accounts (id, balance) VALUES (1, 1000), (2, 250)"); err != nil { + t.Fatal(err) + } + } + + // Print out the balances. + rows, err := db.Query("SELECT id, balance FROM accounts") + if err != nil { + t.Fatal(err) + } + defer rows.Close() + t.Log("Initial balances:") + for rows.Next() { + var id, balance int + if err := rows.Scan(&id, &balance); err != nil { + t.Fatal(err) + } + t.Log("balances", id, balance) + } + + countRows, err := db.Query("SELECT COUNT(*) as count FROM accounts") + if err != nil { + t.Fatal(err) + } + defer countRows.Close() + count := getCount(t, countRows) + if count != 2 { + t.Fatal(fmt.Errorf("found incorrect number of rows. Expected 2 got %v", count)) + } + + t.Log("finished testing database") +} + +func RequireCRDBClusterToFunction(t *testing.T, crdbCluster CockroachCluster, rotate bool, podName string) { + db := getDBConn(t, crdbCluster, "system", podName) + + if rotate { + t.Log("Verifying the existing data in the database after certificate rotation") + } + + // Create database only if we are testing crdb install + if !rotate { + if _, err := db.Exec("CREATE DATABASE IF NOT EXISTS test_db"); err != nil { t.Fatal(err) } } @@ -314,7 +420,8 @@ func PrintDebugLogs(t *testing.T, options *k8s.KubectlOptions) { // RequireToRunRotateJob triggers the client/node or CA certificate rotation job based on next cron schedule. func RequireToRunRotateJob(t *testing.T, crdbCluster CockroachCluster, values map[string]string, - scheduleToTriggerRotation string, caRotate bool) { + scheduleToTriggerRotation string, caRotate bool, +) { var args []string var jobName string imageName := fmt.Sprintf("gcr.io/cockroachlabs-helm-charts/cockroach-self-signer-cert:%s", values["tls.selfSigner.image.tag"])