From 0d8d9a806a98f721d6c1cd7788be06b14ebe43a4 Mon Sep 17 00:00:00 2001 From: see-quick Date: Fri, 27 Sep 2024 22:36:24 +0200 Subject: [PATCH 01/12] [system test] [doc] kafka package Signed-off-by: see-quick --- development-docs/systemtests/labels/kafka.md | 7 + .../systemtest/docs/TestDocsLabels.java | 2 + .../systemtest/kafka/ConfigProviderST.java | 32 ++ .../systemtest/kafka/KafkaNodePoolST.java | 136 +++--- .../io/strimzi/systemtest/kafka/KafkaST.java | 336 +++++++------- .../systemtest/kafka/KafkaVersionsST.java | 28 ++ .../io/strimzi/systemtest/kafka/QuotasST.java | 49 ++- .../systemtest/kafka/TieredStorageST.java | 46 +- .../dynamicconfiguration/DynamicConfST.java | 64 ++- .../DynamicConfSharedST.java | 22 +- .../kafka/listeners/ListenersST.java | 412 +++++++++++++++++- .../kafka/listeners/MultipleListenersST.java | 94 ++++ 12 files changed, 955 insertions(+), 273 deletions(-) create mode 100644 development-docs/systemtests/labels/kafka.md diff --git a/development-docs/systemtests/labels/kafka.md b/development-docs/systemtests/labels/kafka.md new file mode 100644 index 00000000000..30151222386 --- /dev/null +++ b/development-docs/systemtests/labels/kafka.md @@ -0,0 +1,7 @@ +# **Kafka** + +## Description + +These tests validate the core Apache Kafka functionality within the Strimzi ecosystem, ensuring the reliability, scalability, and correctness of Kafka clusters. +They cover various aspects such as dynamic configuration updates, listener configurations, node pool management, version upgrades, quotas, and tiered storage. +These tests are crucial to ensure that Kafka clusters can handle production workloads. \ No newline at end of file diff --git a/systemtest/src/main/java/io/strimzi/systemtest/docs/TestDocsLabels.java b/systemtest/src/main/java/io/strimzi/systemtest/docs/TestDocsLabels.java index e823ef3f603..5a7aca67e09 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/docs/TestDocsLabels.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/docs/TestDocsLabels.java @@ -10,7 +10,9 @@ */ public interface TestDocsLabels { + String KAFKA = "kafka"; String BRIDGE = "bridge"; String CONNECT = "connect"; String CRUISE_CONTROL = "cruise-control"; + String DYNAMIC_CONFIGURATION = "dynamic-configuration"; } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java index 1e15bc2b51b..03589a664ee 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java @@ -11,11 +11,17 @@ import io.fabric8.kubernetes.api.model.rbac.RoleBuilder; import io.fabric8.kubernetes.api.model.rbac.RoleRefBuilder; import io.fabric8.kubernetes.api.model.rbac.SubjectBuilder; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.connect.KafkaConnect; import io.strimzi.operator.common.Annotations; import io.strimzi.operator.common.model.Labels; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; +import io.strimzi.systemtest.docs.TestDocsLabels; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.NodePoolsConverter; import io.strimzi.systemtest.resources.ResourceManager; @@ -38,11 +44,37 @@ import static io.strimzi.test.k8s.KubeClusterResource.kubeClient; @Tag(REGRESSION) +@SuiteDoc( + description = @Desc("Test ensuring Kafka Connect works properly using ConfigMap and EnvVar configuration."), + beforeTestSteps = { + @Step(value = "Deploy uber operator across all namespaces, with custom configuration", expected = "Uber operator is deployed") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } +) public class ConfigProviderST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(ConfigProviderST.class); @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test ensuring Kafka Connect works properly using ConfigMap and EnvVar configuration."), + steps = { + @Step(value = "Initialize test storage and define custom file sink path", expected = "Test storage is initialized and file sink path is set"), + @Step(value = "Create broker and controller pools", expected = "Resources are created and are in ready state"), + @Step(value = "Create Kafka cluster", expected = "Kafka cluster is ready with 3 brokers"), + @Step(value = "Create ConfigMap for connector configuration", expected = "ConfigMap with connector configuration is created"), + @Step(value = "Deploy Kafka Connect with external configuration", expected = "Kafka Connect is deployed with proper configuration"), + @Step(value = "Create necessary Role and RoleBinding for connector", expected = "Role and RoleBinding are created and applied"), + @Step(value = "Deploy Kafka connector", expected = "Kafka connector is successfully deployed"), + @Step(value = "Deploy Kafka clients", expected = "Kafka clients are deployed and ready"), + @Step(value = "Send messages and verify they are written to file sink", expected = "Messages are successfully written to the specified file sink") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testConnectWithConnectorUsingConfigAndEnvProvider() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String customFileSinkPath = "/tmp/my-own-path.txt"; diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java index 62db2d75ece..4aa75a6a005 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java @@ -6,6 +6,11 @@ import io.fabric8.kubernetes.api.model.LabelSelector; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.kafka.Kafka; import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; @@ -15,6 +20,7 @@ import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; +import io.strimzi.systemtest.docs.TestDocsLabels; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.NodePoolsConverter; import io.strimzi.systemtest.resources.ResourceManager; @@ -49,6 +55,16 @@ @Tag(REGRESSION) +@SuiteDoc( + description = @Desc("This test suite verifies various functionalities of Kafka Node Pools in a Kafka cluster."), + beforeTestSteps = { + @Step(value = "Ensure the environment is not using OLM or Helm and Kafka Node Pools are enabled", expected = "Environment is validated"), + @Step(value = "Install the default cluster operator", expected = "Cluster operator is installed") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } +) public class KafkaNodePoolST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(KafkaNodePoolST.class); @@ -70,6 +86,18 @@ public class KafkaNodePoolST extends AbstractST { * - broker-id-management */ @ParallelNamespaceTest + @TestDoc( + description = @Desc("This test case verifies the management of broker IDs in Kafka Node Pools using annotations."), + steps = { + @Step(value = "Deploy a Kafka instance with annotations to manage Node Pools and Initial NodePool (Initial) to hold Topics and act as controller.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool custom resource, with IDs 90, 91."), + @Step(value = "Deploy additional 2 NodePools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]).", expected = "NodePools are deployed, NodePool A contains ID 4, NodePool B contains IDs 6, 0."), + @Step(value = "Annotate NodePool A 'next-node-ids' and NodePool B 'remove-node-ids' respectively ([20-21],[6,55]) afterward scale to 4 and 1 replicas resp.", expected = "NodePools are scaled, NodePool A contains IDs 4, 20, 21, 1. NodePool B contains ID 0."), + @Step(value = "Annotate NodePool A 'remove-node-ids' and NodePool B 'next-node-ids' respectively ([20],[1]) afterward scale to 2 and 6 replicas resp.", expected = "NodePools are scaled, NodePool A contains IDs 1, 4. NodePool B contains IDs 2, 3, 5.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testKafkaNodePoolBrokerIdsManagementUsingAnnotations() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String nodePoolNameA = testStorage.getBrokerPoolName() + "-a"; @@ -161,27 +189,23 @@ void testKafkaNodePoolBrokerIdsManagementUsingAnnotations() { KafkaNodePoolUtils.getCurrentKafkaNodePoolIds(testStorage.getNamespaceName(), nodePoolNameB).equals(Arrays.asList(0, 2, 3, 5))); } - /** - * @description This test case verifies changing of roles in Kafka Node Pools. - * - * @steps - * 1. - Deploy a Kafka instance with annotations to manage Node Pools and Initial 2 NodePools, both with mixed role, first one stable, second one which will be modified. - * 2. - Create KafkaTopic with replica number requiring all Kafka Brokers to be present. - * 3. - Annotate one of Node Pools to perform manual Rolling Update. - * - Rolling Update started. - * 3. - Change role of Kafka Node Pool from mixed to controller only role. - * - Role Change is being prevented because a previously created KafkaTopic still has some replicas present on the node to be scaled down, also there is original Rolling Update going on. - * 4. - Original Rolling Update finishes successfully. - * 5. - Delete previously created KafkaTopic. - * - KafkaTopic is deleted, and roll of Node Pool whose role was changed begins resulting in new nodes with expected role. - * 6. - Change role of Kafka Node Pool from controller only to mixed role. - * - Kafka Node Pool changes role to mixed role. - * 7. - Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present. - * - * @usecase - * - kafka-node-pool - */ @ParallelNamespaceTest + @TestDoc( + description = @Desc("This test case verifies changing of roles in Kafka Node Pools."), + steps = { + @Step(value = "Deploy a Kafka instance with annotations to manage Node Pools and Initial 2 NodePools, both with mixed role, first one stable, second one which will be modified.", expected = "Kafka instance with initial Node Pools is deployed."), + @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present.", expected = "KafkaTopic is created."), + @Step(value = "Annotate one of Node Pools to perform manual Rolling Update.", expected = "Rolling Update started."), + @Step(value = "Change role of Kafka Node Pool from mixed to controller only role.", expected = "Role Change is prevented due to existing KafkaTopic replicas and ongoing Rolling Update."), + @Step(value = "Original Rolling Update finishes successfully.", expected = "Rolling Update is completed."), + @Step(value = "Delete previously created KafkaTopic.", expected = "KafkaTopic is deleted and Node Pool role change is initiated."), + @Step(value = "Change role of Kafka Node Pool from controller only to mixed role.", expected = "Kafka Node Pool changes role to mixed role."), + @Step(value = "Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present.", expected = "Messages are produced and consumed successfully.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testNodePoolsRolesChanging() { assumeTrue(Environment.isKRaftModeEnabled()); final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -246,27 +270,21 @@ void testNodePoolsRolesChanging() { transmitMessagesWithNewTopicAndClean(testStorage, 5); } - /** - * @description This test case verifies possibility of adding and removing Kafka Node Pools into existing Kafka cluster. - * - * @steps - * 1. - Deploy a Kafka instance with annotations to manage Node Pools and Initial 2 NodePools, one being controller if possible other initial broker. - * - Kafka instance is deployed according to Kafka and KafkaNodePool custom resource. - * 2. - Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. - * - transition of messages is finished successfully, KafkaTopic created and cleaned as expected. - * 3. - Add extra KafkaNodePool with broker role to the Kafka. - * - KafkaNodePool is deployed and ready. - * 4. - Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. - * - transition of messages is finished successfully, KafkaTopic created and cleaned as expected. - * 5. - Remove one of kafkaNodePool with broker role. - * - KafkaNodePool is removed, Pods are deleted, but other pods in Kafka are stable and ready. - * 6. - Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. - * - transition of messages is finished successfully, KafkaTopic created and cleaned as expected. - * - * @usecase - * - kafka-node-pool - */ @ParallelNamespaceTest + @TestDoc( + description = @Desc("This test case verifies the possibility of adding and removing Kafka Node Pools into an existing Kafka cluster."), + steps = { + @Step(value = "Deploy a Kafka instance with annotations to manage Node Pools and Initial 2 NodePools.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool custom resource."), + @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully, KafkaTopic created and cleaned as expected."), + @Step(value = "Add extra KafkaNodePool with broker role to the Kafka.", expected = "KafkaNodePool is deployed and ready."), + @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully, KafkaTopic created and cleaned as expected."), + @Step(value = "Remove one of kafkaNodePool with broker role.", expected = "KafkaNodePool is removed, Pods are deleted, but other pods in Kafka are stable and ready."), + @Step(value = "Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully, KafkaTopic created and cleaned as expected.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testNodePoolsAdditionAndRemoval() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); // node pools name convention is 'A' for all roles (: if possible i.e. based on feature gate) 'B' for broker roles. @@ -314,30 +332,22 @@ void testNodePoolsAdditionAndRemoval() { transmitMessagesWithNewTopicAndClean(testStorage, 2); } - /** - * @description This test case verifies transfer of Kafka Cluster from and to management by KafkaNodePool, by creating corresponding Kafka and KafkaNodePool custom resources - * and manipulating according kafka annotation. - * - * @steps - * 1. - Deploy Kafka with annotated to enable management by KafkaNodePool, and KafkaNodePool targeting given Kafka Cluster. - * - Kafka is deployed, KafkaNodePool custom resource is targeting Kafka Cluster as expected. - * 2. - Modify KafkaNodePool by increasing number of Kafka Replicas. - * - Number of Kafka Pods is increased to match specification from KafkaNodePool - * 3. - Produce and consume messages in given Kafka Cluster. - * - Clients can produce and consume messages. - * 4. - Modify Kafka custom resource annotation strimzi.io/node-pool to disable management by KafkaNodePool. - * - StrimziPodSet is modified, replacing former one, Pods are replaced and specification from KafkaNodePool (i.e., changed replica count) are ignored. - * 5. - Produce and consume messages in given Kafka Cluster. - * - Clients can produce and consume messages. - * 6. - Modify Kafka custom resource annotation strimzi.io/node-pool to enable management by KafkaNodePool. - * - new StrimziPodSet is created, replacing former one, Pods are replaced and specification from KafkaNodePool (i.e., changed replica count) has priority over Kafka specification. - * 7. - Produce and consume messages in given Kafka Cluster. - * - Clients can produce and consume messages. - * - * @usecase - * - kafka-node-pool - */ @ParallelNamespaceTest + @TestDoc( + description = @Desc("This test case verifies transfer of Kafka Cluster from and to management by KafkaNodePool, by creating corresponding Kafka and KafkaNodePool custom resources and manipulating according Kafka annotation."), + steps = { + @Step(value = "Deploy Kafka with annotated to enable management by KafkaNodePool, and KafkaNodePool targeting given Kafka Cluster.", expected = "Kafka is deployed, KafkaNodePool custom resource is targeting Kafka Cluster as expected."), + @Step(value = "Modify KafkaNodePool by increasing number of Kafka Replicas.", expected = "Number of Kafka Pods is increased to match specification from KafkaNodePool."), + @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages."), + @Step(value = "Modify Kafka custom resource annotation strimzi.io/node-pool to disable management by KafkaNodePool.", expected = "StrimziPodSet is modified, replacing former one, Pods are replaced and specification from KafkaNodePool (i.e., changed replica count) are ignored."), + @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages."), + @Step(value = "Modify Kafka custom resource annotation strimzi.io/node-pool to enable management by KafkaNodePool.", expected = "New StrimziPodSet is created, replacing former one, Pods are replaced and specification from KafkaNodePool (i.e., changed replica count) has priority over Kafka specification."), + @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testKafkaManagementTransferToAndFromKafkaNodePool() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final int originalKafkaReplicaCount = 3; diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java index 6a3d6ceacf9..0fd7832fbe7 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java @@ -18,6 +18,11 @@ import io.fabric8.kubernetes.api.model.Service; import io.fabric8.kubernetes.api.model.VolumeMount; import io.fabric8.kubernetes.api.model.VolumeMountBuilder; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.bridge.KafkaBridgeResources; import io.strimzi.api.kafka.model.common.JvmOptions; import io.strimzi.api.kafka.model.common.JvmOptionsBuilder; @@ -49,6 +54,7 @@ import io.strimzi.systemtest.annotations.KindNotSupported; import io.strimzi.systemtest.annotations.MultiNodeClusterOnly; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; +import io.strimzi.systemtest.docs.TestDocsLabels; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.NodePoolsConverter; import io.strimzi.systemtest.resources.ResourceManager; @@ -106,30 +112,32 @@ @Tag(REGRESSION) @SuppressWarnings("checkstyle:ClassFanOutComplexity") +@SuiteDoc( + description = @Desc("Suite containing kafka related stuff (i.e., JVM resources, EO, TO or UO removal from Kafka cluster), which ensures proper functioning of Kafka clusters."), + beforeTestSteps = { + @Step(value = "Deploy cluster operator across all namespaces, with custom configuration", expected = "Cluster operator is deployed") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } +) class KafkaST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(KafkaST.class); private static final String OPENSHIFT_CLUSTER_NAME = "openshift-my-cluster"; - /** - * @description This test case verifies that Pod's resources (limits and requests), custom JVM configurations, and expected Java configuration - * are propagated correctly to Pods, containers, and processes. - * - * @steps - * 1. - Deploy Kafka and its components with custom specifications, including specifying resources and JVM configuration - * - Kafka and its components (ZooKeeper, Entity Operator) are deployed - * 2. - For each of components (Kafka, ZooKeeper, Topic Operator, User Operator), verify specified configuration of JVM, resources, and also environment variables. - * - Each of the components has requests and limits assigned correctly, JVM, and environment variables configured according to the specification. - * 3. - Wait for a time to observe that none of initiated components needed Rolling Update. - * - All of Kafka components remained in stable state. - * - * @usecase - * - JVM - * - configuration - * - resources - * - environment variables - */ @ParallelNamespaceTest @SuppressWarnings({"checkstyle:MethodLength"}) + @TestDoc( + description = @Desc("This test case verifies that Pod's resources (limits and requests), custom JVM configurations, and expected Java configuration are propagated correctly to Pods, containers, and processes."), + steps = { + @Step(value = "Deploy Kafka and its components with custom specifications, including specifying resources and JVM configuration", expected = "Kafka and its components (ZooKeeper, Entity Operator) are deployed"), + @Step(value = "For each of components (Kafka, ZooKeeper, Topic Operator, User Operator), verify specified configuration of JVM, resources, and also environment variables", expected = "Each of the components has requests and limits assigned correctly, JVM, and environment variables configured according to the specification."), + @Step(value = "Wait for a time to observe that none of initiated components needed Rolling Update", expected = "All of Kafka components remained in stable state.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testJvmAndResources() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -300,29 +308,20 @@ void testJvmAndResources() { DeploymentUtils.waitForNoRollingUpdate(testStorage.getNamespaceName(), eoDepName, eoPods); } - /** - * @description This test case verifies the correct deployment of Entity Operator, i.e., including both User Operator and Topic Operator. - * Entity Operator is firstly modified to exclude User Operator, afterwards it is modified to default configuration, which includes User Operator. - * The next step is removal of Topic Operator itself and finally, also removing User Operator, with Topic Operator being already removed. - * - * @steps - * 1. - Deploy Kafka with Entity Operator set. - * - Kafka is deployed, and Entity Operator consist of both Topic and User Operators - * 2. - Remove User Operator from the Kafka specification - * - User Operator container is deleted - * 3. - Set User Operator back in the Kafka specification - * - User Operator container is recreated - * 4. - Remove Topic Operator from the Kafka specification - * - Topic Operator container is removed Entity Operator - * 5. - Remove User Operator from the Kafka specification - * - Entity Operator Pod is removed, as there are no other containers present. - * - * @usecase - * - Entity Operator - * - Topic Operator - * - User Operator - */ @ParallelNamespaceTest + @TestDoc( + description = @Desc("This test case verifies the correct deployment of Entity Operator, i.e., including both User Operator and Topic Operator. Entity Operator is firstly modified to exclude User Operator, afterwards it is modified to default configuration, which includes User Operator. The next step is removal of Topic Operator itself and finally, also removing User Operator, with Topic Operator being already removed."), + steps = { + @Step(value = "Deploy Kafka with Entity Operator set.", expected = "Kafka is deployed, and Entity Operator consist of both Topic and User Operators"), + @Step(value = "Remove User Operator from the Kafka specification", expected = "User Operator container is deleted"), + @Step(value = "Set User Operator back in the Kafka specification", expected = "User Operator container is recreated"), + @Step(value = "Remove Topic Operator from the Kafka specification", expected = "Topic Operator container is removed from Entity Operator"), + @Step(value = "Remove User Operator from the Kafka specification", expected = "Entity Operator Pod is removed, as there are no other containers present.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testRemoveComponentsFromEntityOperator() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -394,28 +393,20 @@ void testRemoveComponentsFromEntityOperator() { PodUtils.waitUntilPodStabilityReplicasCount(testStorage.getNamespaceName(), KafkaResources.entityOperatorDeploymentName(testStorage.getClusterName()), 0); } - /** - * @description This test case verifies that Kafka with persistent storage, and JBOD storage, property 'delete claim' of JBOD storage. - * - * @steps - * 1. - Deploy Kafka with persistent storage and JBOD storage with 2 volumes, both of these are configured to delete their Persistent Volume Claims on Kafka cluster un-provision. - * - Kafka is deployed, volumes are labeled and linked to Pods correctly. - * 2. - Verify that labels in Persistent Volume Claims are set correctly. - * - Persistent Volume Claims do contain expected labels and values. - * 2. - Modify Kafka Custom Resource, specifically 'delete claim' property of its first Kafka Volume. - * - Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by Cluster Operator. - * 3. - Delete Kafka cluster. - * - Kafka cluster and its components are deleted, including Persistent Volume Claim of Volume with 'delete claim' property set to true. - * 4. - Verify remaining Persistent Volume Claims. - * - Persistent Volume Claim referenced by volume of formerly deleted Kafka Custom Resource with property 'delete claim' set to true is still present. - * - * @usecase - * - JBOD - * - PVC - * - volume - * - annotations - */ @ParallelNamespaceTest + @TestDoc( + description = @Desc("This test case verifies that Kafka with persistent storage, and JBOD storage, property 'delete claim' of JBOD storage."), + steps = { + @Step(value = "Deploy Kafka with persistent storage and JBOD storage with 2 volumes, both of these are configured to delete their Persistent Volume Claims on Kafka cluster un-provision.", expected = "Kafka is deployed, volumes are labeled and linked to Pods correctly."), + @Step(value = "Verify that labels in Persistent Volume Claims are set correctly.", expected = "Persistent Volume Claims do contain expected labels and values."), + @Step(value = "Modify Kafka Custom Resource, specifically 'delete claim' property of its first Kafka Volume.", expected = "Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by Cluster Operator."), + @Step(value = "Delete Kafka cluster.", expected = "Kafka cluster and its components are deleted, including Persistent Volume Claim of Volume with 'delete claim' property set to true."), + @Step(value = "Verify remaining Persistent Volume Claims.", expected = "Persistent Volume Claim referenced by volume of formerly deleted Kafka Custom Resource with property 'delete claim' set to true is still present.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testKafkaJBODDeleteClaimsTrueFalse() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final int kafkaReplicas = 2; @@ -494,6 +485,18 @@ void testKafkaJBODDeleteClaimsTrueFalse() { @ParallelNamespaceTest @Tag(LOADBALANCER_SUPPORTED) + @TestDoc( + description = @Desc("Test regenerates certificates after changing Kafka's external address."), + steps = { + @Step(value = "Create Kafka without external listener", expected = "Kafka instance is created without an external listener"), + @Step(value = "Edit Kafka to include an external listener", expected = "External listener is correctly added to the Kafka instance"), + @Step(value = "Wait until the Kafka component has rolled", expected = "Kafka component rolls successfully with the new external listener"), + @Step(value = "Compare Kafka broker secrets before and after adding external listener", expected = "Secrets are different before and after adding the external listener") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testRegenerateCertExternalAddressChange() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -544,29 +547,20 @@ void testRegenerateCertExternalAddressChange() { }); } - /** - * @description This test case verifies the presence of expected Strimzi specific labels, also labels and annotations specified by user. - * Some of user-specified labels are later modified (new one is added, one is modified) which triggers rolling update after which - * all changes took place as expected. - * - * @steps - * 1. - Deploy Kafka with persistent storage and specify custom labels in CR metadata, and also other labels and annotation in PVC metadata - * - Kafka is deployed with its default labels and all others specified by user. - * 2. - Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected - * - Producer and Consumer are able to produce and consume messages respectively. - * 3. - Modify configuration of Kafka CR with addition of new labels and modification of existing - * - Kafka is rolling and new labels are present in Kafka CR, and managed resources - * 4. - Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected - * - Producer and Consumer are able to produce and consume messages respectively. - * - * @usecase - * - annotations - * - labels - * - kafka-rolling-update - * - persistent-storage - */ @ParallelNamespaceTest @SuppressWarnings({"checkstyle:JavaNCSS", "checkstyle:NPathComplexity", "checkstyle:MethodLength", "checkstyle:CyclomaticComplexity"}) + @TestDoc( + description = @Desc("This test case verifies the presence of expected Strimzi specific labels, also labels and annotations specified by user. Some of user-specified labels are later modified (new one is added, one is modified) which triggers rolling update after which all changes took place as expected."), + steps = { + @Step(value = "Deploy Kafka with persistent storage and specify custom labels in CR metadata, and also other labels and annotation in PVC metadata", expected = "Kafka is deployed with its default labels and all others specified by user."), + @Step(value = "Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected", expected = "Producer and Consumer are able to produce and consume messages respectively."), + @Step(value = "Modify configuration of Kafka CR with addition of new labels and modification of existing", expected = "Kafka is rolling and new labels are present in Kafka CR, and managed resources"), + @Step(value = "Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected", expected = "Producer and Consumer are able to produce and consume messages respectively.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testLabelsExistenceAndManipulation() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -875,27 +869,20 @@ void testLabelsExistenceAndManipulation() { ClientUtils.waitForInstantClientSuccess(testStorage); } - /** - * @description This test case verifies correct storage of messages on disk, and their presence even after rolling update of all Kafka Pods. Test case - * also checks if offset topic related files are present. - * - * @steps - * 1. - Deploy persistent Kafka with corresponding configuration of offsets topic. - * - Kafka is created with expected configuration. - * 2. - Create KafkaTopic with corresponding configuration - * - KafkaTopic is created with expected configuration. - * 3. - Execute command to check presence of offsets topic related files. - * - Files related to Offset topic are present. - * 4. - Produce default number of messages to already created topic. - * - Produced messages are present. - * 5. - Perform rolling update on all Kafka Pods, in this case single broker. - * - After rolling update is completed all messages are again present, as they were successfully stored on disk. - * - * @usecase - * - data-storage - * - kafka-configuration - */ @ParallelNamespaceTest + @TestDoc( + description = @Desc("This test case verifies correct storage of messages on disk, and their presence even after rolling update of all Kafka Pods. Test case also checks if offset topic related files are present."), + steps = { + @Step(value = "Deploy persistent Kafka with corresponding configuration of offsets topic.", expected = "Kafka is created with expected configuration."), + @Step(value = "Create KafkaTopic with corresponding configuration.", expected = "KafkaTopic is created with expected configuration."), + @Step(value = "Execute command to check presence of offsets topic related files.", expected = "Files related to Offset topic are present."), + @Step(value = "Produce default number of messages to already created topic.", expected = "Produced messages are present."), + @Step(value = "Perform rolling update on all Kafka Pods, in this case single broker.", expected = "After rolling update is completed all messages are again present, as they were successfully stored on disk.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testMessagesAndConsumerOffsetFilesOnDisk() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -925,17 +912,17 @@ void testMessagesAndConsumerOffsetFilesOnDisk() { TestUtils.waitFor("KafkaTopic creation inside Kafka Pod", TestConstants.GLOBAL_POLL_INTERVAL, TestConstants.GLOBAL_TIMEOUT, () -> cmdKubeClient(testStorage.getNamespaceName()).execInPod(brokerPodName, "/bin/bash", - "-c", "cd /var/lib/kafka/data/kafka-log0; ls -1").out().contains(testStorage.getTopicName())); + "-c", "cd /var/lib/kafka/data/kafka-log0; ls -1").out().contains(testStorage.getTopicName())); String topicDirNameInPod = cmdKubeClient(testStorage.getNamespaceName()).execInPod(brokerPodName, "/bin/bash", - "-c", "cd /var/lib/kafka/data/kafka-log0; ls -1 | sed -n '/" + testStorage.getTopicName() + "/p'").out(); + "-c", "cd /var/lib/kafka/data/kafka-log0; ls -1 | sed -n '/" + testStorage.getTopicName() + "/p'").out(); String commandToGetDataFromTopic = - "cd /var/lib/kafka/data/kafka-log0/" + topicDirNameInPod + "/;cat 00000000000000000000.log"; + "cd /var/lib/kafka/data/kafka-log0/" + topicDirNameInPod + "/;cat 00000000000000000000.log"; LOGGER.info("Executing command: {} in {}", commandToGetDataFromTopic, brokerPodName); String topicData = cmdKubeClient(testStorage.getNamespaceName()).execInPod(brokerPodName, - "/bin/bash", "-c", commandToGetDataFromTopic).out(); + "/bin/bash", "-c", commandToGetDataFromTopic).out(); LOGGER.info("Topic: {} is present in Kafka Broker: {} with no data", testStorage.getTopicName(), brokerPodName); assertThat("Topic contains data", topicData, emptyOrNullString()); @@ -974,23 +961,19 @@ void testMessagesAndConsumerOffsetFilesOnDisk() { assertThat("Topic has no data", topicData, notNullValue()); } - /** - * @description This test case verifies that Kafka (with all its components, including Zookeeper, Entity Operator, KafkaExporter, CruiseControl) configured with - * 'withReadOnlyRootFilesystem' can be deployed and also works correctly. - * - * @steps - * 1. - Deploy persistent Kafka with 3 Kafka and Zookeeper replicas, Entity Operator, CruiseControl, and KafkaExporter. Each component has configuration 'withReadOnlyRootFilesystem' set to true. - * - Kafka and its components are deployed. - * 2. - Create Kafka producer and consumer. - * - Kafka clients are successfully created. - * 3. - Produce and consume messages using created clients. - * - Messages are successfully send and received. - * - * @usecase - * - root-file-system - */ @ParallelNamespaceTest @Tag(CRUISE_CONTROL) + @TestDoc( + description = @Desc("This test case verifies that Kafka (with all its components, including Zookeeper, Entity Operator, KafkaExporter, CruiseControl) configured with 'withReadOnlyRootFilesystem' can be deployed and also works correctly."), + steps = { + @Step(value = "Deploy persistent Kafka with 3 Kafka and Zookeeper replicas, Entity Operator, CruiseControl, and KafkaExporter. Each component has configuration 'withReadOnlyRootFilesystem' set to true.", expected = "Kafka and its components are deployed."), + @Step(value = "Create Kafka producer and consumer.", expected = "Kafka clients are successfully created."), + @Step(value = "Produce and consume messages using created clients.", expected = "Messages are successfully sent and received.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testReadOnlyRootFileSystem() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -1073,6 +1056,20 @@ void testReadOnlyRootFileSystem() { } @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test to ensure that deploying Kafka with an unsupported version results in the expected error."), + steps = { + @Step(value = "Initialize test storage with current context", expected = "Test storage is initialized"), + @Step(value = "Create Kafka node pools", expected = "Kafka node pools are created and ready"), + @Step(value = "Deploy Kafka with a non-existing version", expected = "Kafka deployment with non-supported version begins"), + @Step(value = "Log Kafka deployment process", expected = "Log entry for Kafka deployment is created"), + @Step(value = "Wait for Kafka to not be ready", expected = "Kafka is not ready as expected"), + @Step(value = "Verify Kafka status message for unsupported version", expected = "Error message for unsupported version is found in Kafka status") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testDeployUnsupportedKafka() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); String nonExistingVersion = "6.6.6"; @@ -1086,9 +1083,9 @@ void testDeployUnsupportedKafka() { ); resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() - .editKafka() - .withVersion(nonExistingVersion) - .endKafka() + .editKafka() + .withVersion(nonExistingVersion) + .endKafka() .endSpec() .build() ); @@ -1099,30 +1096,22 @@ void testDeployUnsupportedKafka() { KafkaUtils.waitUntilKafkaStatusConditionContainsMessage(testStorage.getNamespaceName(), testStorage.getClusterName(), nonExistingVersionMessage); } - /** - * @description This test verifies the functionality of resizing JBOD storage volumes on a Kafka cluster. - * It checks that the system can handle volume size changes and performs a rolling update to apply these changes. - * - * @steps - * 1. - Deploy a Kafka cluster with JBOD storage and initial volume sizes. - * - Kafka cluster is operational. - * 2. - Produce and consume messages continuously to simulate cluster activity. - * - Message traffic is consistent. - * 3. - Increase the size of one of the JBOD volumes. - * - Volume size change is applied. - * 4. - Verify that the updated volume size is reflected. - * - PVC reflects the new size. - * 5. - Ensure continuous message production and consumption are unaffected during the update process. - * - Message flow continues without interruption. - * - * @usecase - * - jbod - * - volume-resize - * - persistent-volume-claims - */ @KindNotSupported // Storage Class standard does not support resizing of volumes @MultiNodeClusterOnly // in multi-node we use different Storage Class, which support re-sizing of volumes @ParallelNamespaceTest + @TestDoc( + description = @Desc("This test verifies the functionality of resizing JBOD storage volumes on a Kafka cluster. It checks that the system can handle volume size changes and performs a rolling update to apply these changes."), + steps = { + @Step(value = "Deploy a Kafka cluster with JBOD storage and initial volume sizes.", expected = "Kafka cluster is operational."), + @Step(value = "Produce and consume messages continuously to simulate cluster activity.", expected = "Message traffic is consistent."), + @Step(value = "Increase the size of one of the JBOD volumes.", expected = "Volume size change is applied."), + @Step(value = "Verify that the updated volume size is reflected.", expected = "PVC reflects the new size."), + @Step(value = "Ensure continuous message production and consumption are unaffected during the update process.", expected = "Message flow continues without interruption.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testResizeJbodVolumes() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final int numberOfKafkaReplicas = 3; @@ -1232,22 +1221,18 @@ void testResizeJbodVolumes() { // ############################## } - /** - * @description This test case verifies basic working of Kafka Cluster managed by Cluster Operator with KRaft. - * - * @steps - * 1. - Deploy Kafka annotated to enable KRaft (and additionally annotated to enable management by KafkaNodePool due to default usage of NodePools), and KafkaNodePool targeting given Kafka Cluster. - * - Kafka is deployed, KafkaNodePool custom resource is targeting Kafka Cluster as expected. - * 2. - Produce and consume messages in given Kafka Cluster. - * - Clients can produce and consume messages. - * 3. - Trigger manual Rolling Update. - * - Rolling update is triggered and completed shortly after. - * - * @usecase - * - kafka-node-pool - * - kraft - */ @ParallelNamespaceTest() + @TestDoc( + description = @Desc("This test case verifies basic working of Kafka Cluster managed by Cluster Operator with KRaft."), + steps = { + @Step(value = "Deploy Kafka annotated to enable KRaft (and additionally annotated to enable management by KafkaNodePool due to default usage of NodePools), and KafkaNodePool targeting given Kafka Cluster.", expected = "Kafka is deployed, KafkaNodePool custom resource is targeting Kafka Cluster as expected."), + @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages."), + @Step(value = "Trigger manual Rolling Update.", expected = "Rolling update is triggered and completed shortly after.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testKRaftMode() { assumeTrue(Environment.isKRaftModeEnabled() && Environment.isKafkaNodePoolsEnabled()); @@ -1287,28 +1272,21 @@ void testKRaftMode() { ClientUtils.waitForContinuousClientSuccess(testStorage); } - /** - * @description This test validates the mounting and usage of additional volumes for Kafka, Kafka Connect, and Kafka Bridge components. - * It tests whether secret and config map volumes are correctly created, mounted, and accessible across various deployments. - * - * @steps - * 1. - Setup environment prerequisites and configure test storage. - * - Ensure the environment is in KRaft mode. - * 2. - Create necessary Kafka resources with additional volumes for secrets and config maps. - * - Resources are correctly instantiated with specified volumes. - * 3. - Deploy Kafka, Kafka Connect, and Kafka Bridge with these volumes. - * - Components are correctly configured with additional volumes. - * 4. - Verify that all pods (Kafka, Connect, and Bridge) have additional volumes mounted and accessible. - * - Volumes are correctly mounted and usable within pods. - * - * @usecase - * - additional-volumes - * - secrets-management - * - configuration-management - */ @ParallelNamespaceTest @Tag(CONNECT) @Tag(BRIDGE) + @TestDoc( + description = @Desc("This test validates the mounting and usage of additional volumes for Kafka, Kafka Connect, and Kafka Bridge components. It tests whether secret and config map volumes are correctly created, mounted, and accessible across various deployments."), + steps = { + @Step(value = "Setup environment prerequisites and configure test storage.", expected = "Ensure the environment is in KRaft mode."), + @Step(value = "Create necessary Kafka resources with additional volumes for secrets and config maps.", expected = "Resources are correctly instantiated with specified volumes."), + @Step(value = "Deploy Kafka, Kafka Connect, and Kafka Bridge with these volumes.", expected = "Components are correctly configured with additional volumes."), + @Step(value = "Verify that all pods (Kafka, Connect, and Bridge) have additional volumes mounted and accessible.", expected = "Volumes are correctly mounted and usable within pods.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testAdditionalVolumes() { assumeTrue(Environment.isKRaftModeEnabled()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java index 9eb4b9eaa13..be732bc9552 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java @@ -4,12 +4,18 @@ */ package io.strimzi.systemtest.kafka; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; import io.strimzi.api.kafka.model.user.KafkaUser; import io.strimzi.api.kafka.model.user.acl.AclOperation; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.TestConstants; +import io.strimzi.systemtest.docs.TestDocsLabels; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.NodePoolsConverter; import io.strimzi.systemtest.resources.ResourceManager; @@ -30,6 +36,15 @@ import static io.strimzi.systemtest.TestTags.KAFKA_SMOKE; @Tag(KAFKA_SMOKE) +@SuiteDoc( + description = @Desc("Test checking basic functionality for each supported Kafka version. Ensures that Kafka functionality such as deployment, Topic Operator, User Operator, and message transmission via PLAIN and TLS listeners are working correctly."), + beforeTestSteps = { + @Step(value = "Deploy cluster operator with default installation", expected = "Cluster operator is deployed") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } +) public class KafkaVersionsST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(KafkaVersionsST.class); @@ -46,6 +61,19 @@ public class KafkaVersionsST extends AbstractST { */ @ParameterizedTest(name = "Kafka version: {0}.version()") @MethodSource("io.strimzi.systemtest.utils.TestKafkaVersion#getSupportedKafkaVersions") + @TestDoc( + description = @Desc("Test checking basic functionality for each supported Kafka version. Ensures that Kafka functionality such as deployment, Topic Operator, User Operator, and message transmission via PLAIN and TLS listeners are working correctly."), + steps = { + @Step(value = "Deploy Kafka cluster with specified version", expected = "Kafka cluster is deployed without any issue"), + @Step(value = "Verify the Topic Operator creation", expected = "Topic Operator is working correctly"), + @Step(value = "Verify the User Operator creation", expected = "User Operator is working correctly with SCRAM-SHA and ACLs"), + @Step(value = "Send and receive messages via PLAIN with SCRAM-SHA", expected = "Messages are sent and received successfully"), + @Step(value = "Send and receive messages via TLS", expected = "Messages are sent and received successfully") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testKafkaWithVersion(final TestKafkaVersion testKafkaVersion) { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java index 66ffa455099..6b9ee9b3d8c 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java @@ -4,6 +4,11 @@ */ package io.strimzi.systemtest.kafka; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; @@ -11,6 +16,7 @@ import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; +import io.strimzi.systemtest.docs.TestDocsLabels; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.NodePoolsConverter; import io.strimzi.systemtest.resources.ResourceManager; @@ -37,12 +43,15 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assumptions.assumeFalse; -/** - * NOTE: STs in this class will not properly work on `minikube` clusters (and maybe not on other clusters that uses local - * storage), because the calculation of currently used storage is based - * on the local storage, which can be shared across multiple Docker containers. - * To properly run this suite, you should use cluster with proper storage. - */ +@SuiteDoc( + description = @Desc("NOTE: STs in this class will not properly work on `minikube` clusters (and maybe not on other clusters that use local storage), because the calculation of currently used storage is based on the local storage, which can be shared across multiple Docker containers. To properly run this suite, you should use cluster with proper storage."), + beforeTestSteps = { + @Step(value = "Deploy default cluster operator with the required configurations", expected = "Cluster operator is deployed") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } +) public class QuotasST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(QuotasST.class); @@ -50,6 +59,20 @@ public class QuotasST extends AbstractST { * Test to check Kafka Quotas Plugin for disk space */ @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test to check Kafka Quotas Plugin for disk space."), + steps = { + @Step(value = "Assume the cluster is not Minikube or MicroShift", expected = "Cluster is appropriate for the test"), + @Step(value = "Create necessary resources for Kafka and nodes", expected = "Resources are created and Kafka is set up with quotas plugin"), + @Step(value = "Send messages without any user; observe quota enforcement", expected = "Producer stops after reaching the minimum available bytes"), + @Step(value = "Check Kafka logs for quota enforcement message", expected = "Kafka logs contain the expected quota enforcement message"), + @Step(value = "Send messages with excluded user and observe the behavior", expected = "Messages are sent successfully without hitting the quota"), + @Step(value = "Clean up resources", expected = "Resources are deleted successfully") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testKafkaQuotasPluginIntegration() { assumeFalse(cluster.isMinikube() || cluster.isMicroShift()); @@ -128,6 +151,20 @@ void testKafkaQuotasPluginIntegration() { @ParallelNamespaceTest @Tag(REGRESSION) + @TestDoc( + description = @Desc("Test verifying bandwidth limitations with Kafka quotas plugin."), + steps = { + @Step(value = "Create test storage and set excluded principal", expected = "Test storage is created and excluded principal is set"), + @Step(value = "Create Kafka resources including node pools and persistent Kafka with quotas enabled", expected = "Kafka resources are created successfully with quotas setup"), + @Step(value = "Create Kafka topic and user with SCRAM-SHA authentication", expected = "Kafka topic and SCRAM-SHA user are created successfully"), + @Step(value = "Send messages with normal user", expected = "Messages are sent and duration is measured"), + @Step(value = "Send messages with excluded user", expected = "Messages are sent and duration is measured"), + @Step(value = "Assert that time taken for normal user is greater than for excluded user", expected = "Assertion is successful") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testKafkaQuotasPluginWithBandwidthLimitation() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String excludedPrincipal = "User:" + testStorage.getUsername(); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java index f37e4d0c5a0..e2a3e8fe8d8 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java @@ -5,12 +5,18 @@ package io.strimzi.systemtest.kafka; import com.fasterxml.jackson.core.JsonProcessingException; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.MicroShiftNotSupported; import io.strimzi.systemtest.annotations.ParallelTest; +import io.strimzi.systemtest.docs.TestDocsLabels; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.kafkaclients.internalClients.admin.AdminClient; import io.strimzi.systemtest.resources.NamespaceManager; @@ -56,6 +62,19 @@ @MicroShiftNotSupported("We are using Kaniko and OpenShift builds to build Kafka image with TS. To make it working on Microshift we will invest much time with not much additional value.") @Tag(REGRESSION) @Tag(TIERED_STORAGE) +@SuiteDoc( + description = @Desc("This test suite covers scenarios for Tiered Storage integration implemented within Strimzi."), + beforeTestSteps = { + @Step(value = "Create test namespace", expected = "Namespace is created"), + @Step(value = "Build Kafka image based on passed parameters like image full name, base image, Dockerfile path (via Kaniko or OpenShift build)", expected = "Kafka image is built"), + @Step(value = "Deploy Minio in test namespace and init the client inside the Minio pod", expected = "Minio is deployed and client is initialized"), + @Step(value = "Init bucket in Minio for purposes of these tests", expected = "Bucket is initialized in Minio"), + @Step(value = "Deploy Strimzi Cluster Operator", expected = "Strimzi Cluster Operator is deployed") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } +) public class TieredStorageST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(TieredStorageST.class); @@ -65,21 +84,20 @@ public class TieredStorageST extends AbstractST { private static final String BUILT_IMAGE_TAG = "latest"; private TestStorage suiteStorage; - /** - * @description This testcase is focused on testing of Tiered Storage integration implemented within Strimzi. - * The tests use Aiven Tiered Storage plugin - ... - * - * @steps - * 1. - Deploys KafkaNodePool resource with Broker NodePool with PV of size 10Gi - * 2. - Deploys Kafka resource with configuration of Tiered Storage for Aiven plugin, pointing to Minio S3, and with image built in beforeAll - * 3. - Creates topic with enabled Tiered Storage sync with size of segments set to 10mb (this is needed for speedup the sync) - * 4. - Starts continuous producer to send data to Kafka - * 5. - Wait until Minio size is not empty (contains data from Kafka) - * - * @usecase - * - tiered-storage-integration - */ @ParallelTest + @TestDoc( + description = @Desc("This testcase is focused on testing of Tiered Storage integration implemented within Strimzi. The tests use Aiven Tiered Storage plugin - ..."), + steps = { + @Step(value = "Deploys KafkaNodePool resource with Broker NodePool with PV of size 10Gi", expected = "KafkaNodePool resource is deployed successfully with specified configuration"), + @Step(value = "Deploys Kafka resource with configuration of Tiered Storage for Aiven plugin, pointing to Minio S3, and with image built in beforeAll", expected = "Kafka resource is deployed successfully with Tiered Storage configuration"), + @Step(value = "Creates topic with enabled Tiered Storage sync with size of segments set to 10mb (this is needed for speedup the sync)", expected = "Topic is created successfully with Tiered Storage enabled and segment size of 10mb"), + @Step(value = "Starts continuous producer to send data to Kafka", expected = "Continuous producer starts sending data to Kafka"), + @Step(value = "Wait until Minio size is not empty (contains data from Kafka)", expected = "Minio contains data from Kafka") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testTieredStorageWithAivenPlugin() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java index 8cf58af863d..de311b30192 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java @@ -5,6 +5,11 @@ package io.strimzi.systemtest.kafka.dynamicconfiguration; import io.fabric8.kubernetes.api.model.LabelSelector; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.kafka.KafkaClusterSpec; import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; @@ -14,6 +19,7 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.cli.KafkaCmdClient; +import io.strimzi.systemtest.docs.TestDocsLabels; import io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient; import io.strimzi.systemtest.resources.NodePoolsConverter; import io.strimzi.systemtest.resources.ResourceManager; @@ -54,13 +60,14 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; -/** - * DynamicConfST is responsible for verify that if we change dynamic Kafka configuration it will not - * trigger rolling update. - * Isolated -> for each test case we have different configuration of Kafka resource - */ @Tag(REGRESSION) @Tag(DYNAMIC_CONFIGURATION) +@SuiteDoc( + description = @Desc("DynamicConfST is responsible for verifying that changes in dynamic Kafka configuration do not trigger a rolling update."), + beforeTestSteps = { + @Step(value = "Deploy the Cluster Operator", expected = "Cluster Operator is installed successfully") + } +) public class DynamicConfST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(DynamicConfST.class); @@ -69,6 +76,22 @@ public class DynamicConfST extends AbstractST { private Map kafkaConfig; @IsolatedTest("Using more tha one Kafka cluster in one namespace") + @TestDoc( + description = @Desc("Test for verifying dynamic configuration changes in a Kafka cluster with multiple clusters in one namespace."), + steps = { + @Step(value = "Deep copy shard Kafka configuration", expected = "Configuration map is duplicated with deep copy"), + @Step(value = "Create resources with wait", expected = "Resources are created and ready"), + @Step(value = "Create scraper pod", expected = "Scraper pod is created"), + @Step(value = "Retrieve and verify Kafka configurations from ConfigMaps", expected = "Configurations meet expected values"), + @Step(value = "Retrieve Kafka broker configuration via CLI", expected = "Dynamic configurations are retrieved"), + @Step(value = "Update Kafka configuration for unclean leader election", expected = "Configuration is updated and verified for dynamic property"), + @Step(value = "Verify updated Kafka configurations", expected = "Updated configurations are persistent and correct") + }, + labels = { + @Label(value = TestDocsLabels.DYNAMIC_CONFIGURATION), + @Label(value = TestDocsLabels.KAFKA) + } + ) void testSimpleDynamicConfiguration() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -127,6 +150,19 @@ void testSimpleDynamicConfiguration() { @Tag(NODEPORT_SUPPORTED) @Tag(ROLLING_UPDATE) @IsolatedTest("Using more tha one Kafka cluster in one namespace") + @TestDoc( + description = @Desc("Ensures that updating to an external listener causes a rolling restart of the Kafka brokers."), + steps = { + @Step(value = "Create Kafka cluster with internal and external listeners.", expected = "Kafka cluster is created with the specified listeners."), + @Step(value = "Verify initial configurations are correctly set in the broker.", expected = "Initial broker configurations are verified."), + @Step(value = "Update Kafka cluster to change listener types.", expected = "Change in listener types triggers rolling update."), + @Step(value = "Verify the rolling restart is successful.", expected = "All broker nodes successfully rolled and Kafka configuration updated.") + }, + labels = { + @Label(value = TestDocsLabels.DYNAMIC_CONFIGURATION), + @Label(value = TestDocsLabels.KAFKA) + } + ) void testUpdateToExternalListenerCausesRollingRestart() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -276,6 +312,24 @@ void testUpdateToExternalListenerCausesRollingRestart() { @Tag(NODEPORT_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) @Tag(ROLLING_UPDATE) + @TestDoc( + description = @Desc("Test validating that updating Kafka cluster listeners to use external clients causes a rolling restart."), + steps = { + @Step(value = "Setup initial Kafka cluster and resources", expected = "Kafka cluster and resources are successfully created"), + @Step(value = "Create external Kafka clients and verify message production/consumption on plain listener", expected = "Messages are successfully produced and consumed using plain listener"), + @Step(value = "Attempt to produce/consume messages using TLS listener before update", expected = "Exception is thrown because the listener is plain"), + @Step(value = "Update Kafka cluster to use external TLS listener", expected = "Kafka cluster is updated and rolling restart occurs"), + @Step(value = "Verify message production/consumption using TLS listener after update", expected = "Messages are successfully produced and consumed using TLS listener"), + @Step(value = "Attempt to produce/consume messages using plain listener after TLS update", expected = "Exception is thrown because the listener is TLS"), + @Step(value = "Revert Kafka cluster listener to plain", expected = "Kafka cluster listener is reverted and rolling restart occurs"), + @Step(value = "Verify message production/consumption on plain listener after reverting", expected = "Messages are successfully produced and consumed using plain listener"), + @Step(value = "Attempt to produce/consume messages using TLS listener after reverting", expected = "Exception is thrown because the listener is plain") + }, + labels = { + @Label(value = TestDocsLabels.DYNAMIC_CONFIGURATION), + @Label(value = TestDocsLabels.KAFKA) + } + ) void testUpdateToExternalListenerCausesRollingRestartUsingExternalClients() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java index 6d0ade5016e..95bdb970c7f 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java @@ -4,11 +4,16 @@ */ package io.strimzi.systemtest.kafka.dynamicconfiguration; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.kafka.config.model.ConfigModel; import io.strimzi.kafka.config.model.Type; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.Environment; +import io.strimzi.systemtest.docs.TestDocsLabels; import io.strimzi.systemtest.resources.NodePoolsConverter; import io.strimzi.systemtest.resources.ResourceManager; import io.strimzi.systemtest.resources.crd.StrimziPodSetResource; @@ -40,13 +45,20 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; -/** - * DynamicConfigurationSharedST is responsible for verify that if we change dynamic Kafka configuration it will not - * trigger rolling update - * Shared -> for each test case we same configuration of Kafka resource - */ @Tag(REGRESSION) @Tag(DYNAMIC_CONFIGURATION) +@SuiteDoc( + description = @Desc("DynamicConfigurationSharedST is responsible for verifying that changing dynamic Kafka configuration will not trigger a rolling update. Shared -> for each test case we use the same Kafka resource configuration."), + beforeTestSteps = { + @Step(value = "Run cluster operator installation", expected = "Cluster operator is installed"), + @Step(value = "Deploy shared Kafka across all test cases", expected = "Shared Kafka is deployed"), + @Step(value = "Deploy scraper pod", expected = "Scraper pod is deployed") + }, + labels = { + @Label(value = TestDocsLabels.DYNAMIC_CONFIGURATION), + @Label(value = TestDocsLabels.KAFKA) + } +) public class DynamicConfSharedST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(DynamicConfSharedST.class); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java index 923436b866e..9f160147ebb 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java @@ -7,6 +7,11 @@ import io.fabric8.kubernetes.api.model.Secret; import io.fabric8.kubernetes.api.model.SecretBuilder; import io.fabric8.kubernetes.api.model.Service; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.common.template.ContainerEnvVarBuilder; import io.strimzi.api.kafka.model.kafka.Kafka; import io.strimzi.api.kafka.model.kafka.KafkaResources; @@ -25,6 +30,7 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.OpenShiftOnly; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; +import io.strimzi.systemtest.docs.TestDocsLabels; import io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; @@ -91,6 +97,15 @@ import static org.junit.jupiter.api.Assertions.assertEquals; @Tag(REGRESSION) +@SuiteDoc( + description = @Desc("This class demonstrates various tests for Kafka listeners using different authentication mechanisms."), + beforeTestSteps = { + @Step(value = "Install the cluster operator with default settings", expected = "Cluster operator is installed successfully") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } +) public class ListenersST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(ListenersST.class); @@ -123,6 +138,19 @@ public class ListenersST extends AbstractST { * Test sending messages over plain transport, without auth */ @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test sending messages over plain transport, without auth"), + steps = { + @Step(value = "Create test storage instance", expected = "Instance of TestStorage is created"), + @Step(value = "Create Kafka resources with wait", expected = "Kafka broker, controller, and topic are created"), + @Step(value = "Log transmission message", expected = "Transmission message is logged"), + @Step(value = "Produce and consume messages with plain clients", expected = "Messages are successfully produced and consumed"), + @Step(value = "Validate Kafka service discovery annotation", expected = "The discovery annotation is validated successfully") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testSendMessagesPlainAnonymous() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -150,6 +178,21 @@ void testSendMessagesPlainAnonymous() { * Test sending messages over tls transport using mutual tls auth */ @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test sending messages over tls transport using mutual tls auth."), + steps = { + @Step(value = "Create TestStorage instance", expected = "TestStorage object is created"), + @Step(value = "Create Kafka node pool resources", expected = "Persistent storage node pools are created"), + @Step(value = "Disable plain listener and enable tls listener in Kafka resource", expected = "Kafka with plain listener disabled and tls listener enabled is created"), + @Step(value = "Create Kafka topic and user", expected = "Kafka topic and tls user are created"), + @Step(value = "Configure and deploy Kafka clients", expected = "Kafka clients producer and consumer with tls are deployed"), + @Step(value = "Wait for clients to successfully send and receive messages", expected = "Clients successfully send and receive messages over tls"), + @Step(value = "Assert that the service discovery contains expected info", expected = "Service discovery matches expected info") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testSendMessagesTlsAuthenticated() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -214,6 +257,21 @@ void testSendMessagesTlsAuthenticated() { * Test sending messages over plain transport using scram sha auth */ @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test sending messages over plain transport using scram sha auth."), + steps = { + @Step(value = "Initialize test storage and resources", expected = "Test storage and resources are initialized"), + @Step(value = "Create Kafka brokers and controllers", expected = "Kafka brokers and controllers are created"), + @Step(value = "Enable Kafka with plain listener disabled and scram sha auth", expected = "Kafka instance with scram sha auth is enabled on a specified listener"), + @Step(value = "Set up topic and user", expected = "Kafka topic and Kafka user are set up with scram sha auth credentials"), + @Step(value = "Check logs in broker pod for authentication", expected = "Logs show that scram sha authentication succeeded"), + @Step(value = "Send messages over plain transport using scram sha authentication", expected = "Messages are successfully sent over plain transport using scram sha auth"), + @Step(value = "Verify service discovery annotation", expected = "Service discovery annotation is checked and validated") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testSendMessagesPlainScramSha() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -277,6 +335,21 @@ void testSendMessagesPlainScramSha() { */ @ParallelNamespaceTest @Tag(ACCEPTANCE) + @TestDoc( + description = @Desc("Test sending messages over TLS transport using SCRAM-SHA authentication."), + steps = { + @Step(value = "Initialize test storage with test context", expected = "Test storage is initialized"), + @Step(value = "Create resources for Kafka node pools", expected = "Kafka node pools are created"), + @Step(value = "Create Kafka cluster with SCRAM-SHA-512 authentication", expected = "Kafka cluster is created with SCRAM-SHA authentication"), + @Step(value = "Create Kafka topic and user", expected = "Kafka topic and user are created"), + @Step(value = "Transmit messages over TLS using SCRAM-SHA", expected = "Messages are successfully transmitted"), + @Step(value = "Check if generated password has the expected length", expected = "Password length is as expected"), + @Step(value = "Verify Kafka service discovery annotation", expected = "Service discovery annotation is as expected") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testSendMessagesTlsScramSha() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final int passwordLength = 50; @@ -345,6 +418,19 @@ void testSendMessagesTlsScramSha() { */ @ParallelNamespaceTest @Tag(ACCEPTANCE) + @TestDoc( + description = @Desc("Test custom listener configured with scram SHA authentication and TLS."), + steps = { + @Step(value = "Initialize test storage and resource manager", expected = "Resources are initialized successfully"), + @Step(value = "Create a Kafka cluster with broker and controller node pools", expected = "Kafka cluster is created with node pools"), + @Step(value = "Create a Kafka cluster with custom listener using TLS and SCRAM-SHA authentication", expected = "Kafka cluster with custom listener is ready"), + @Step(value = "Create a Kafka topic and SCRAM-SHA user", expected = "Kafka topic and user are created"), + @Step(value = "Transmit messages over TLS using SCRAM-SHA authentication", expected = "Messages are transmitted successfully") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testSendMessagesCustomListenerTlsScramSha() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -393,6 +479,20 @@ void testSendMessagesCustomListenerTlsScramSha() { @ParallelNamespaceTest @Tag(NODEPORT_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) + @TestDoc( + description = @Desc("Test checking the functionality of Kafka cluster with NodePort external listener configurations."), + steps = { + @Step(value = "Initialize TestStorage object and set labels and annotations", expected = "TestStorage object is initialized and labels and annotations are set"), + @Step(value = "Create resource with Kafka broker pool and controller pool", expected = "Resources with Kafka pools are created successfully"), + @Step(value = "Create Kafka cluster with NodePort and TLS listeners", expected = "Kafka cluster is set up with the specified listeners"), + @Step(value = "Create ExternalKafkaClient and verify message production and consumption", expected = "Messages are produced and consumed successfully"), + @Step(value = "Check Kafka status for proper listener addresses", expected = "Listener addresses in Kafka status are validated successfully"), + @Step(value = "Check ClusterRoleBinding annotations and labels in Kafka cluster", expected = "Annotations and labels match the expected values") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testNodePort() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final Map label = Collections.singletonMap("my-label", "value"); @@ -495,6 +595,20 @@ void testNodePort() { @ParallelNamespaceTest @Tag(NODEPORT_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) + @TestDoc( + description = @Desc("Test verifying that NodePort configuration can be overridden for Kafka brokers and bootstrap service."), + steps = { + @Step(value = "Initialize TestStorage instance and define broker and bootstrap NodePort values.", expected = "TestStorage instance is created and NodePort values are initialized."), + @Step(value = "Create Kafka broker and controller pools using resource manager.", expected = "Kafka broker and controller pools are created successfully."), + @Step(value = "Deploy Kafka cluster with overridden NodePort configuration for brokers and bootstrap.", expected = "Kafka cluster is deployed with specified NodePort values."), + @Step(value = "Verify that the bootstrap service NodePort matches the configured value.", expected = "Bootstrap NodePort matches the configured value of 32100."), + @Step(value = "Verify that the broker service NodePort matches the configured value.", expected = "Broker NodePort matches the configured value of 32000."), + @Step(value = "Produce and consume messages using an external Kafka client.", expected = "Messages are produced and consumed successfully using the external client.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testOverrideNodePortConfiguration() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -562,6 +676,20 @@ void testOverrideNodePortConfiguration() { @ParallelNamespaceTest @Tag(NODEPORT_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) + @TestDoc( + description = @Desc("Test the NodePort TLS functionality for Kafka brokers in a Kubernetes environment."), + steps = { + @Step(value = "Initialize test storage with the test context", expected = "Test storage is initialized with the test context"), + @Step(value = "Create Kafka broker and controller node pools", expected = "Broker and controller node pools are created"), + @Step(value = "Deploy Kafka cluster with NodePort listener and TLS enabled", expected = "Kafka cluster is deployed with NodePort listener and TLS"), + @Step(value = "Create a Kafka topic", expected = "Kafka topic is created"), + @Step(value = "Create a Kafka user with TLS authentication", expected = "Kafka user with TLS authentication is created"), + @Step(value = "Configure external Kafka client and send and receive messages using TLS", expected = "External Kafka client sends and receives messages using TLS successfully") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testNodePortTls() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -608,6 +736,19 @@ void testNodePortTls() { @ParallelNamespaceTest @Tag(LOADBALANCER_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) + @TestDoc( + description = @Desc("Test verifying load balancer functionality with external clients."), + steps = { + @Step(value = "Create instances for broker pool and controller pool using NodePoolsConverter and KafkaNodePoolTemplates", expected = "Resources are created and ready for use"), + @Step(value = "Create Kafka cluster with ephemeral storage and load balancer listener", expected = "Kafka cluster is created with the specified configuration"), + @Step(value = "Wait until the load balancer address is reachable", expected = "Address is reachable"), + @Step(value = "Configure external Kafka client and send messages", expected = "Messages are sent successfully"), + @Step(value = "Verify that messages are correctly produced and consumed", expected = "Messages are produced and consumed as expected") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testLoadBalancer() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -655,6 +796,19 @@ void testLoadBalancer() { @Tag(ACCEPTANCE) @Tag(LOADBALANCER_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) + @TestDoc( + description = @Desc("Test validating the TLS connection through a Kafka LoadBalancer."), + steps = { + @Step(value = "Create and configure Kafka node pools", expected = "Node pools for brokers and controllers are created"), + @Step(value = "Create and configure Kafka cluster with TLS listener", expected = "Kafka cluster with TLS enabled LoadBalancer listener is created"), + @Step(value = "Create and configure Kafka user with TLS authentication", expected = "Kafka user with TLS authentication is created"), + @Step(value = "Wait for the LoadBalancer address to be reachable", expected = "LoadBalancer address becomes reachable"), + @Step(value = "Send and receive messages using external Kafka client", expected = "Messages are successfully produced and consumed over the TLS connection") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testLoadBalancerTls() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -703,6 +857,20 @@ void testLoadBalancerTls() { } @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test verifies the functionality of Kafka with a cluster IP listener."), + steps = { + @Step(value = "Initialize the test storage and resource manager", expected = "Test storage and resource manager are initialized"), + @Step(value = "Create the Kafka broker and controller pools", expected = "Kafka broker and controller pools are created"), + @Step(value = "Create the Kafka cluster with a cluster IP listener", expected = "Kafka cluster with cluster IP listener is created"), + @Step(value = "Retrieve the cluster IP bootstrap address", expected = "Cluster IP bootstrap address is correctly retrieved"), + @Step(value = "Deploy Kafka clients", expected = "Kafka clients are deployed successfully"), + @Step(value = "Wait for Kafka clients to succeed", expected = "Kafka clients successfully produce and consume messages") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testClusterIp() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -733,6 +901,20 @@ void testClusterIp() { } @ParallelNamespaceTest + @TestDoc( + description = @Desc("This test validates the creation of Kafka resources with TLS authentication, ensuring proper setup and functionality of the Kafka cluster in a parallel namespace."), + steps = { + @Step(value = "Create TestStorage instance and initialize resource manager with broker and controller node pools", expected = "Broker and controller node pools are set up successfully"), + @Step(value = "Create ephemeral Kafka cluster with TLS enabled on ClusterIP listener", expected = "Kafka cluster is created with TLS enabled listener on port 9103"), + @Step(value = "Create Kafka user with TLS authentication", expected = "Kafka user is created successfully"), + @Step(value = "Retrieve the ClusterIP bootstrap address for the Kafka cluster", expected = "Bootstrap address for the Kafka cluster is retrieved"), + @Step(value = "Instantiate TLS Kafka Clients (producer and consumer)", expected = "TLS Kafka clients are instantiated successfully"), + @Step(value = "Wait for the Kafka Clients to complete their tasks and verify success", expected = "Kafka Clients complete their tasks successfully") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testClusterIpTls() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -775,6 +957,23 @@ void testClusterIpTls() { @ParallelNamespaceTest @Tag(NODEPORT_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) + @TestDoc( + description = @Desc("Test custom certificates in Kafka listeners, specifically for the NodePort type."), + steps = { + @Step(value = "Generate Root CA certificate and key", expected = "Root CA certificate and key are generated"), + @Step(value = "Generate Intermediate CA certificate and key using Root CA", expected = "Intermediate CA certificate and key are generated"), + @Step(value = "Generate Kafka Broker certificate and key using Intermediate CA", expected = "Broker certificate and key are generated"), + @Step(value = "Export generated certificates and keys to PEM files", expected = "PEM files are created with certificates and keys"), + @Step(value = "Create custom secret with the PEM files", expected = "Custom secret is created within the required namespace"), + @Step(value = "Deploy and wait for Kafka cluster resources with custom certificates", expected = "Kafka cluster is deployed successfully with custom certificates"), + @Step(value = "Create and wait for TLS KafkaUser", expected = "TLS KafkaUser is created successfully"), + @Step(value = "Produce and consume messages using ExternalKafkaClient", expected = "Messages are successfully produced and consumed"), + @Step(value = "Produce and consume messages using internal TLS client", expected = "Messages are successfully produced and consumed with internal TLS client") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCustomSoloCertificatesForNodePort() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String clusterCustomCertServer1 = testStorage.getClusterName() + "-" + customCertServer1; @@ -863,6 +1062,24 @@ void testCustomSoloCertificatesForNodePort() { @ParallelNamespaceTest @Tag(NODEPORT_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) + @TestDoc( + description = @Desc("Test verifies the custom chain certificates configuration for Kafka NodePort listener."), + steps = { + @Step(value = "Initialize test storage.", expected = "Test storage is initialized."), + @Step(value = "Generate custom root CA and intermediate certificates.", expected = "Root and intermediate certificates are generated."), + @Step(value = "Generate end entity certificates using intermediate CA.", expected = "End entity certificates are generated."), + @Step(value = "Export certificates to PEM files.", expected = "Certificates are exported to PEM files."), + @Step(value = "Create Kubernetes secrets with the custom certificates.", expected = "Custom certificate secrets are created."), + @Step(value = "Deploy Kafka cluster with NodePort listener using the custom certificates.", expected = "Kafka cluster is deployed successfully."), + @Step(value = "Create a Kafka user with TLS authentication.", expected = "Kafka user is created."), + @Step(value = "Verify message production and consumption with external Kafka client.", expected = "Messages are produced and consumed successfully."), + @Step(value = "Verify message production with internal Kafka client.", expected = "Messages are produced successfully."), + @Step(value = "Verify message consumption with internal Kafka client.", expected = "Messages are consumed successfully.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCustomChainCertificatesForNodePort() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String clusterCustomCertChain1 = testStorage.getClusterName() + "-" + customCertChain1; @@ -955,6 +1172,22 @@ void testCustomChainCertificatesForNodePort() { @ParallelNamespaceTest @Tag(LOADBALANCER_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) + @TestDoc( + description = @Desc("Test verifying custom solo certificates for load balancer in a Kafka cluster."), + steps = { + @Step(value = "Initialize test storage", expected = "Test storage is initialized with the current test context"), + @Step(value = "Create custom secret", expected = "Custom secret is created with the specified certificate and key"), + @Step(value = "Create Kafka resources with node pools", expected = "Kafka brokers and controller pools are created and configured"), + @Step(value = "Create Kafka cluster with listeners", expected = "Kafka cluster is created with internal and load balancer listeners using the custom certificates"), + @Step(value = "Create TLS user", expected = "TLS user is created"), + @Step(value = "Verify produced and consumed messages via external client", expected = "Messages are successfully produced and consumed using the custom certificates"), + @Step(value = "Create and verify TLS producer client", expected = "TLS producer client is created and verified for success"), + @Step(value = "Create and verify TLS consumer client", expected = "TLS consumer client is created and verified for success") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCustomSoloCertificatesForLoadBalancer() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String clusterCustomCertServer1 = testStorage.getClusterName() + "-" + customCertServer1; @@ -1039,6 +1272,22 @@ void testCustomSoloCertificatesForLoadBalancer() { @ParallelNamespaceTest @Tag(LOADBALANCER_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) + @TestDoc( + description = @Desc("Verifies custom certificate chain configuration for Kafka load balancer, ensuring proper secret creation, resource setup, and message sending/receiving functionality."), + steps = { + @Step(value = "Create custom secrets for certificate chains and root CA", expected = "Secrets are created successfully"), + @Step(value = "Deploy Kafka broker and controller pools with custom certificates", expected = "Kafka pools are deployed without issues"), + @Step(value = "Deploy Kafka cluster with custom listener configurations", expected = "Kafka cluster is deployed with custom listener configurations"), + @Step(value = "Set up Kafka topic and user", expected = "Kafka topic and user are created successfully"), + @Step(value = "Verify message production and consumption via external Kafka client with TLS", expected = "Messages are produced and consumed successfully"), + @Step(value = "Set up Kafka clients for further messaging operations", expected = "Kafka clients are set up without issues"), + @Step(value = "Produce messages using Kafka producer", expected = "Messages are produced successfully"), + @Step(value = "Consume messages using Kafka consumer", expected = "Messages are consumed successfully") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCustomChainCertificatesForLoadBalancer() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String clusterCustomCertChain1 = testStorage.getClusterName() + "-" + customCertChain1; @@ -1130,6 +1379,24 @@ void testCustomChainCertificatesForLoadBalancer() { @Tag(ROUTE) @Tag(EXTERNAL_CLIENTS_USED) @OpenShiftOnly + @TestDoc( + description = @Desc("Test custom solo certificates for Kafka route and client communication."), + steps = { + @Step(value = "Generate root CA certificate and key", expected = "Root CA certificate and key are generated"), + @Step(value = "Generate intermediate CA certificate and key", expected = "Intermediate CA certificate and key are generated"), + @Step(value = "Generate end-entity certificate and key for Strimzi", expected = "End-entity certificate and key for Strimzi are generated"), + @Step(value = "Export certificates and keys to PEM files", expected = "Certificates and keys are exported to PEM files"), + @Step(value = "Create custom secret with certificates and keys", expected = "Custom secret is created in the namespace with certificates and keys"), + @Step(value = "Deploy Kafka cluster with custom certificates", expected = "Kafka cluster is deployed with custom certificates"), + @Step(value = "Create TLS Kafka user", expected = "TLS Kafka user is created"), + @Step(value = "Verify client communication using external Kafka client", expected = "Messages are successfully produced and consumed using external Kafka client"), + @Step(value = "Deploy Kafka clients with custom certificates", expected = "Kafka clients are deployed with custom certificates"), + @Step(value = "Verify client communication using internal Kafka client", expected = "Messages are successfully produced and consumed using internal Kafka client") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCustomSoloCertificatesForRoute() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String clusterCustomCertServer1 = testStorage.getClusterName() + "-" + customCertServer1; @@ -1219,6 +1486,23 @@ void testCustomSoloCertificatesForRoute() { @Tag(EXTERNAL_CLIENTS_USED) @Tag(ROUTE) @OpenShiftOnly + @TestDoc( + description = @Desc("Test to verify custom chain certificates for a Kafka Route."), + steps = { + @Step(value = "Initialize TestStorage", expected = "TestStorage instance is created and associated resources are initialized"), + @Step(value = "Generate root and intermediate certificates", expected = "Root and intermediate CA keys are generated"), + @Step(value = "Create cluster custom certificate chain and root CA secrets", expected = "Custom certificate chain and root CA secrets are created in OpenShift"), + @Step(value = "Create Kafka cluster with custom certificates", expected = "Kafka cluster is deployed with custom certificates for internal and external listeners"), + @Step(value = "Create Kafka user", expected = "Kafka user with TLS authentication is created"), + @Step(value = "Verify message production and consumption with external Kafka client", expected = "Messages are produced and consumed successfully using the external Kafka client"), + @Step(value = "Create Kafka clients for internal message production and consumption", expected = "Internal Kafka clients are created and configured with TLS authentication"), + @Step(value = "Verify internal message production with Kafka client", expected = "Messages are produced successfully using the internal Kafka client"), + @Step(value = "Verify internal message consumption with Kafka client", expected = "Messages are consumed successfully using the internal Kafka client") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCustomChainCertificatesForRoute() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -1308,11 +1592,28 @@ void testCustomChainCertificatesForRoute() { ClientUtils.waitForInstantConsumerClientSuccess(testStorage); } - @ParallelNamespaceTest @Tag(LOADBALANCER_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) @SuppressWarnings({"checkstyle:MethodLength"}) + @TestDoc( + description = @Desc("This test verifies the behavior of Kafka with custom certificates for load balancer and TLS rolling updates."), + steps = { + @Step(value = "Initialize test storage", expected = "Test storage initialized with context"), + @Step(value = "Create custom secrets for Kafka clusters", expected = "Secrets created and available in namespace"), + @Step(value = "Deploy Kafka resources with load balancer and internal TLS listener", expected = "Kafka resources deployed with respective configurations"), + @Step(value = "Create Kafka user and retrieve certificates", expected = "Kafka user created and certificates retrieved from Kafka status and secrets"), + @Step(value = "Compare Kafka certificates with secret certificates", expected = "Certificates from Kafka status and secrets match"), + @Step(value = "Verify message production and consumption using an external Kafka client", expected = "Messages successfully produced and consumed over SSL"), + @Step(value = "Trigger and verify TLS rolling update", expected = "TLS rolling update completed successfully"), + @Step(value = "Repeat certificate verification steps after rolling update", expected = "Certificates from Kafka status and secrets match post update"), + @Step(value = "Repeatedly produce and consume messages to ensure Kafka stability", expected = "Messages successfully produced and consumed, ensuring stability"), + @Step(value = "Revert the certificate updates and verify Kafka status", expected = "Certificates reverted and verified, Kafka operates normally") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCustomCertLoadBalancerAndTlsRollingUpdate() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String clusterCustomCertServer1 = testStorage.getClusterName() + "-" + customCertServer1; @@ -1560,6 +1861,23 @@ void testCustomCertLoadBalancerAndTlsRollingUpdate() { @Tag(NODEPORT_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) @SuppressWarnings({"checkstyle:MethodLength"}) + @TestDoc( + description = @Desc("Test verifies custom certificates with NodePort and rolling update in Kafka."), + steps = { + @Step(value = "Generate root and intermediate certificates", expected = "Certificates are generated successfully"), + @Step(value = "Generate end-entity certificates", expected = "End-entity certificates are generated successfully"), + @Step(value = "Create custom secrets with generated certificates", expected = "Secrets are created in Kubernetes"), + @Step(value = "Deploy Kafka cluster with custom NodePort and TLS settings", expected = "Kafka cluster is deployed and running"), + @Step(value = "Verify messages sent and received through external Kafka client", expected = "Messages are produced and consumed successfully"), + @Step(value = "Perform rolling update and update certificates in custom secrets", expected = "Rolling update is performed and certificates are updated"), + @Step(value = "Verify messages sent and received after rolling update", expected = "Messages are produced and consumed successfully after update"), + @Step(value = "Restore default certificate configuration and perform rolling update", expected = "Default certificates are restored and rolling update is completed"), + @Step(value = "Verify messages sent and received with restored configuration", expected = "Messages are produced and consumed successfully with restored configuration") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCustomCertNodePortAndTlsRollingUpdate() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String clusterCustomCertServer1 = testStorage.getClusterName() + "-" + customCertServer1; @@ -1813,6 +2131,22 @@ void testCustomCertNodePortAndTlsRollingUpdate() { @Tag(ROUTE) @OpenShiftOnly @SuppressWarnings({"checkstyle:MethodLength"}) + @TestDoc( + description = @Desc("This test verifies the custom certificate handling and TLS rolling update mechanisms for Kafka brokers using OpenShift-specific configurations."), + steps = { + @Step(value = "Create various certificate chains and export them to PEM files", expected = "Certificates are created and exported successfully"), + @Step(value = "Create custom secrets with the generated certificates", expected = "Secrets are created in the specified namespace"), + @Step(value = "Deploy Kafka cluster and TLS user with specified configurations", expected = "Kafka cluster and TLS user are deployed successfully"), + @Step(value = "Verify certificates in KafkaStatus match those in the secrets", expected = "Certificates are verified to match"), + @Step(value = "Use external Kafka client to produce and consume messages", expected = "Messages are produced and consumed successfully"), + @Step(value = "Update Kafka listeners with new certificates and perform rolling update", expected = "Kafka cluster rolls out successfully with updated certificates"), + @Step(value = "Verify certificates in KafkaStatus match after update", expected = "Certificates are verified to match after the update"), + @Step(value = "Repeat message production and consumption with updated certificates", expected = "Messages are produced and consumed successfully with new certificates") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCustomCertRouteAndTlsRollingUpdate() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String clusterCustomCertServer1 = testStorage.getClusterName() + "-" + customCertServer1; @@ -2081,6 +2415,19 @@ void testCustomCertRouteAndTlsRollingUpdate() { } @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test for verifying non-existing custom certificate handling by creating necessary resources and ensuring correct error message check."), + steps = { + @Step(value = "Initialize TestStorage object", expected = "TestStorage instance is created"), + @Step(value = "Create necessary Kafka node pools", expected = "Kafka node pools are created and initialized"), + @Step(value = "Create Kafka cluster with a listener using non-existing certificate", expected = "Kafka cluster resource is initialized with non-existing TLS certificate"), + @Step(value = "Wait for pods to be ready if not in KRaft mode", expected = "Pods are ready"), + @Step(value = "Wait for Kafka status condition message indicating the non-existing secret", expected = "Correct error message regarding the non-existing secret appears") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testNonExistingCustomCertificate() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String nonExistingCertName = "non-existing-certificate"; @@ -2119,6 +2466,21 @@ void testNonExistingCustomCertificate() { } @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test checking behavior when Kafka is configured with a non-existing certificate in the TLS listener."), + steps = { + @Step(value = "Initialize TestStorage object with test context.", expected = "TestStorage object is initialized."), + @Step(value = "Define non-existing certificate name.", expected = "Non-existing certificate name is defined."), + @Step(value = "Create a custom secret for Kafka with the defined certificate.", expected = "Custom secret created successfully."), + @Step(value = "Create Kafka node pools resources.", expected = "Kafka node pools resources created."), + @Step(value = "Create Kafka cluster with ephemeral storage and the non-existing certificate.", expected = "Kafka cluster creation initiated."), + @Step(value = "Wait for controller pods to be ready if in non-KRaft mode.", expected = "Controller pods are ready."), + @Step(value = "Wait until Kafka status message indicates missing certificate.", expected = "Error message about missing certificate is found in Kafka status condition.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCertificateWithNonExistingDataCrt() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String nonExistingCertName = "non-existing-crt"; @@ -2161,6 +2523,21 @@ void testCertificateWithNonExistingDataCrt() { } @ParallelNamespaceTest + @TestDoc( + description = @Desc("Test verifies that a Kafka cluster correctly identifies and reports the absence of a specified custom certificate private key."), + steps = { + @Step(value = "Initialize test storage.", expected = "Test storage is initialized with the current test context."), + @Step(value = "Define the non-existing certificate key.", expected = "The non-existing certificate key string is defined."), + @Step(value = "Create a custom secret with a certificate for Kafka server.", expected = "Custom secret is created in the namespace."), + @Step(value = "Create broker and controller resources with node pools.", expected = "Resources are created and ready."), + @Step(value = "Deploy a Kafka cluster with a listener using the custom secret and non-existing key.", expected = "Deployment initiated without waiting for the resources to be ready."), + @Step(value = "If not in KRaft mode, wait for controller pods to be ready.", expected = "Controller pods are in ready state (if applicable)."), + @Step(value = "Check Kafka status condition for custom certificate error message.", expected = "Error message indicating the missing custom certificate private key is present in Kafka status conditions.") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCertificateWithNonExistingDataKey() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final String nonExistingCertKey = "non-existing-key"; @@ -2203,6 +2580,25 @@ void testCertificateWithNonExistingDataKey() { } @ParallelNamespaceTest + @TestDoc( + description = @Desc("Validates that messages can be sent and received over TLS with SCRAM-SHA authentication using a predefined password, and that the password can be updated and still be functional."), + steps = { + @Step(value = "Create a test storage instance", expected = "Test storage is created"), + @Step(value = "Create and encode the initial password", expected = "Initial password is encoded"), + @Step(value = "Create and encode the secondary password", expected = "Secondary password is encoded"), + @Step(value = "Create a secret in Kubernetes with the initial password", expected = "Secret is created and contains the initial password"), + @Step(value = "Verify the password in the secret", expected = "Password in the secret is verified to be correct"), + @Step(value = "Create a KafkaUser with SCRAM-SHA authentication using the secret", expected = "KafkaUser is created with correct authentication settings"), + @Step(value = "Create Kafka cluster and topic with SCRAM-SHA authentication", expected = "Kafka cluster and topic are created correctly"), + @Step(value = "Produce and consume messages using TLS and SCRAM-SHA", expected = "Messages are successfully transmitted and received"), + @Step(value = "Update the secret with the secondary password", expected = "Secret is updated with the new password"), + @Step(value = "Wait for the user password change to take effect", expected = "Password change is detected and applied"), + @Step(value = "Produce and consume messages with the updated password", expected = "Messages are successfully transmitted and received with the new password") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testMessagesTlsScramShaWithPredefinedPassword() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); @@ -2293,6 +2689,20 @@ void testMessagesTlsScramShaWithPredefinedPassword() { @Tag(NODEPORT_SUPPORTED) @ParallelNamespaceTest + @TestDoc( + description = @Desc("Verify that advertised hostnames appear correctly in broker certificates."), + steps = { + @Step(value = "Initialize TestStorage object", expected = "TestStorage object is initialized"), + @Step(value = "Define internal and external advertised hostnames and ports", expected = "Hostnames and ports are defined and listed"), + @Step(value = "Create broker configurations with advertised hostnames and ports", expected = "Broker configurations are created"), + @Step(value = "Deploy resources with Wait function and create Kafka instance", expected = "Resources and Kafka instance are successfully created"), + @Step(value = "Retrieve broker certificates from Kubernetes secrets", expected = "Certificates are retrieved correctly from secrets"), + @Step(value = "Validate that each broker's certificate contains the expected internal and external advertised hostnames", expected = "Certificates contain the correct advertised hostnames") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testAdvertisedHostNamesAppearsInBrokerCerts() throws CertificateException { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java index 135520ca706..415b030c514 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java @@ -4,6 +4,11 @@ */ package io.strimzi.systemtest.kafka.listeners; +import io.skodjob.annotations.Desc; +import io.skodjob.annotations.Label; +import io.skodjob.annotations.Step; +import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListener; import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; @@ -14,6 +19,7 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.OpenShiftOnly; +import io.strimzi.systemtest.docs.TestDocsLabels; import io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; @@ -48,6 +54,15 @@ import static org.junit.jupiter.api.Assumptions.assumeFalse; @Tag(REGRESSION) +@SuiteDoc( + description = @Desc("Test to verify the functionality of using multiple NodePorts in a Kafka cluster within the same namespace."), + beforeTestSteps = { + @Step(value = "Initialize TestStorage with current test context", expected = "TestStorage object is initialized successfully") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } +) public class MultipleListenersST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(MultipleListenersST.class); @@ -59,12 +74,32 @@ public class MultipleListenersST extends AbstractST { @Tag(NODEPORT_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) @IsolatedTest("Using more tha one Kafka cluster in one namespace") + @TestDoc( + description = @Desc("Test verifying the functionality of using multiple NodePorts in a Kafka cluster within the same namespace."), + steps = { + @Step(value = "Initialize TestStorage with current test context", expected = "TestStorage object is initialized successfully"), + @Step(value = "Execute listener tests with NodePort configuration", expected = "Listener tests run without issues using NodePort") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testMultipleNodePorts() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); runListenersTest(testCases.get(KafkaListenerType.NODEPORT), testStorage.getClusterName()); } @IsolatedTest("Using more tha one Kafka cluster in one namespace") + @TestDoc( + description = @Desc("Test to verify the usage of more than one Kafka cluster within a single namespace."), + steps = { + @Step(value = "Initialize TestStorage with the test context", expected = "TestStorage instance is created"), + @Step(value = "Run the internal Kafka listeners test", expected = "Listeners test runs successfully on the specified cluster") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testMultipleInternal() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); runListenersTest(testCases.get(KafkaListenerType.INTERNAL), testStorage.getClusterName()); @@ -74,6 +109,18 @@ void testMultipleInternal() { @Tag(ACCEPTANCE) @Tag(EXTERNAL_CLIENTS_USED) @IsolatedTest("Using more tha one Kafka cluster in one namespace") + @TestDoc( + description = @Desc("Test verifying the combination of internal and external Kafka listeners."), + steps = { + @Step(value = "Check if the environment supports cluster-wide NodePort rights", expected = "Test is skipped if the environment is not suitable"), + @Step(value = "Initialize test storage with context", expected = "Test storage is successfully initialized"), + @Step(value = "Retrieve and combine internal and NodePort listeners", expected = "Listeners are successfully retrieved and combined"), + @Step(value = "Run listeners test with combined listeners", expected = "Listeners test is executed successfully") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCombinationOfInternalAndExternalListeners() { // Nodeport needs cluster wide rights to work properly which is not possible with STRIMZI_RBAC_SCOPE=NAMESPACE assumeFalse(Environment.isNamespaceRbacScope()); @@ -94,6 +141,17 @@ void testCombinationOfInternalAndExternalListeners() { @Tag(LOADBALANCER_SUPPORTED) @IsolatedTest("Using more tha one Kafka cluster in one namespace") @Tag(EXTERNAL_CLIENTS_USED) + @TestDoc( + description = @Desc("Test verifying the behavior of multiple load balancers in a single namespace using more than one Kafka cluster."), + steps = { + @Step(value = "Initialize TestStorage instance", expected = "TestStorage instance is created with proper context"), + @Step(value = "Run listeners test with LOADBALANCER type", expected = "Listeners test executes successfully with load balancers"), + @Step(value = "Validate the results", expected = "Results match the expected outcomes for multiple load balancers") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testMultipleLoadBalancers() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); runListenersTest(testCases.get(KafkaListenerType.LOADBALANCER), testStorage.getClusterName()); @@ -103,6 +161,17 @@ void testMultipleLoadBalancers() { @Tag(EXTERNAL_CLIENTS_USED) @Tag(ROUTE) @IsolatedTest("Using more tha one Kafka cluster in one namespace") + @TestDoc( + description = @Desc("Test to verify the functionality of multiple Kafka route listeners in a single namespace."), + steps = { + @Step(value = "Initialize the TestStorage object with the current test context", expected = "TestStorage object is created successfully"), + @Step(value = "Retrieve test cases for Kafka Listener Type ROUTE", expected = "Test cases for ROUTE are retrieved"), + @Step(value = "Run listener tests using the retrieved test cases and cluster name", expected = "Listener tests run successfully with no errors") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testMultipleRoutes() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); runListenersTest(testCases.get(KafkaListenerType.ROUTE), testStorage.getClusterName()); @@ -112,6 +181,19 @@ void testMultipleRoutes() { @Tag(NODEPORT_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) @IsolatedTest("Using more tha one Kafka cluster in one namespace") + @TestDoc( + description = @Desc("Test ensuring that different types of external Kafka listeners (ROUTE and NODEPORT) work correctly when mixed."), + steps = { + @Step(value = "Initialize TestStorage", expected = "TestStorage is initialized with TestContext"), + @Step(value = "Retrieve route listeners", expected = "Route listeners are retrieved from test cases"), + @Step(value = "Retrieve nodeport listeners", expected = "Nodeport listeners are retrieved from test cases"), + @Step(value = "Combine route and nodeport listeners", expected = "Multiple different listeners list is populated"), + @Step(value = "Run listeners test", expected = "Listeners test runs using the combined list") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testMixtureOfExternalListeners() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); List multipleDifferentListeners = new ArrayList<>(); @@ -131,6 +213,18 @@ void testMixtureOfExternalListeners() { @Tag(LOADBALANCER_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) @IsolatedTest("Using more tha one Kafka cluster in one namespace") + @TestDoc( + description = @Desc("Verifies the combination of every kind of Kafka listener: INTERNAL, NODEPORT, ROUTE, and LOADBALANCER."), + steps = { + @Step(value = "Create test storage instance", expected = "Test storage instance created with current test context"), + @Step(value = "Retrieve different types of Kafka listeners", expected = "Lists of INTERNAL, NODEPORT, ROUTE, and LOADBALANCER listeners are retrieved"), + @Step(value = "Combine all different listener lists", expected = "A combined list of all Kafka listener types is created"), + @Step(value = "Run listeners test with combined listener list", expected = "Listeners test runs with all types of Kafka listeners in the combined list") + }, + labels = { + @Label(value = TestDocsLabels.KAFKA) + } + ) void testCombinationOfEveryKindOfListener() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); From 8621457d6e5905e83eb62a4305b2dbdeecc0bf87 Mon Sep 17 00:00:00 2001 From: see-quick Date: Fri, 27 Sep 2024 22:54:32 +0200 Subject: [PATCH 02/12] polishing Signed-off-by: see-quick --- .../systemtest/kafka/ConfigProviderST.java | 3 +-- .../systemtest/kafka/KafkaNodePoolST.java | 17 ----------------- .../systemtest/kafka/KafkaVersionsST.java | 10 ---------- .../io/strimzi/systemtest/kafka/QuotasST.java | 5 +---- .../systemtest/kafka/TieredStorageST.java | 13 ------------- .../kafka/listeners/ListenersST.java | 19 ------------------- .../kafka/listeners/MultipleListenersST.java | 10 ---------- 7 files changed, 2 insertions(+), 75 deletions(-) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java index 03589a664ee..109c7590557 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java @@ -47,7 +47,7 @@ @SuiteDoc( description = @Desc("Test ensuring Kafka Connect works properly using ConfigMap and EnvVar configuration."), beforeTestSteps = { - @Step(value = "Deploy uber operator across all namespaces, with custom configuration", expected = "Uber operator is deployed") + @Step(value = "Deploy cluster operator across all namespaces, with custom configuration", expected = "Cluster operator is deployed") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -61,7 +61,6 @@ public class ConfigProviderST extends AbstractST { @TestDoc( description = @Desc("Test ensuring Kafka Connect works properly using ConfigMap and EnvVar configuration."), steps = { - @Step(value = "Initialize test storage and define custom file sink path", expected = "Test storage is initialized and file sink path is set"), @Step(value = "Create broker and controller pools", expected = "Resources are created and are in ready state"), @Step(value = "Create Kafka cluster", expected = "Kafka cluster is ready with 3 brokers"), @Step(value = "Create ConfigMap for connector configuration", expected = "ConfigMap with connector configuration is created"), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java index 4aa75a6a005..45678709b91 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java @@ -68,23 +68,6 @@ public class KafkaNodePoolST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(KafkaNodePoolST.class); - /** - * @description This test case verifies the management of broker IDs in Kafka Node Pools using annotations. - * - * @steps - * 1. - Deploy a Kafka instance with annotations to manage Node Pools and Initial NodePool (Initial) to hold Topics and act as controller. - * - Kafka instance is deployed according to Kafka and KafkaNodePool custom resource, with IDs 90, 91. - * 2. - Deploy additional 2 NodePools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]). - * - NodePools are deployed, NodePool A contains ID 4, NodePoolB contains Ids 6, 0. - * 3. - Annotate NodePool A 'next-node-ids' and NodePool B 'remove-node-ids' respectively ([20-21],[6,55]) afterward scale to 4 and 1 replica resp. - * - NodePools are scaled, NodePool A contains IDs 4, 20, 21, 1. NodePool B contains ID 0. - * 4. - Annotate NodePool A 'remove-node-ids' and NodePool B 'next-node-ids' respectively ([20],[1]) afterward scale to 2 and 6 replica resp. - * - NodePools are scaled, NodePool A contains IDs 1, 4. NodePool B contains ID 2, 3, 5. - * - * @usecase - * - kafka-node-pool - * - broker-id-management - */ @ParallelNamespaceTest @TestDoc( description = @Desc("This test case verifies the management of broker IDs in Kafka Node Pools using annotations."), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java index be732bc9552..a9f9428664d 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java @@ -49,16 +49,6 @@ public class KafkaVersionsST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(KafkaVersionsST.class); - /** - * Test checking basic functionality for each supported Kafka version. - * Ensures that for every Kafka version: - * - Kafka cluster is deployed without an issue - * - with Topic Operator, User Operator, 3 Zookeeper and Kafka pods - * - Topic Operator is working - because of the KafkaTopic creation - * - User Operator is working - because of SCRAM-SHA, ACLs and overall KafkaUser creations - * - Sending and receiving messages is working to PLAIN (with SCRAM-SHA) and TLS listeners - * @param testKafkaVersion TestKafkaVersion added for each iteration of the parametrized test - */ @ParameterizedTest(name = "Kafka version: {0}.version()") @MethodSource("io.strimzi.systemtest.utils.TestKafkaVersion#getSupportedKafkaVersions") @TestDoc( diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java index 6b9ee9b3d8c..e6d2679fab6 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java @@ -55,9 +55,6 @@ public class QuotasST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(QuotasST.class); - /** - * Test to check Kafka Quotas Plugin for disk space - */ @ParallelNamespaceTest @TestDoc( description = @Desc("Test to check Kafka Quotas Plugin for disk space."), @@ -154,7 +151,7 @@ void testKafkaQuotasPluginIntegration() { @TestDoc( description = @Desc("Test verifying bandwidth limitations with Kafka quotas plugin."), steps = { - @Step(value = "Create test storage and set excluded principal", expected = "Test storage is created and excluded principal is set"), + @Step(value = "Set excluded principal", expected = "Principal is set"), @Step(value = "Create Kafka resources including node pools and persistent Kafka with quotas enabled", expected = "Kafka resources are created successfully with quotas setup"), @Step(value = "Create Kafka topic and user with SCRAM-SHA authentication", expected = "Kafka topic and SCRAM-SHA user are created successfully"), @Step(value = "Send messages with normal user", expected = "Messages are sent and duration is measured"), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java index e2a3e8fe8d8..a99900e4797 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java @@ -46,19 +46,6 @@ import static io.strimzi.systemtest.TestTags.REGRESSION; import static io.strimzi.systemtest.TestTags.TIERED_STORAGE; -/** - * @description This test suite covers scenarios for Tiered Storage integration implemented within Strimzi. - * - * @steps - * 1. - Create test namespace - * 2. - Build Kafka image based on passed parameters like image full name, base image, Dockerfile path (via Kaniko or OpenShift build) - * 3. - Deploy Minio in test namespace and init the client inside the Minio pod - * 4. - Init bucket in Minio for purposes of these tests - * 5. - Deploy Strimzi Cluster Operator - * - * @usecase - * - tiered-storage-integration - */ @MicroShiftNotSupported("We are using Kaniko and OpenShift builds to build Kafka image with TS. To make it working on Microshift we will invest much time with not much additional value.") @Tag(REGRESSION) @Tag(TIERED_STORAGE) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java index 9f160147ebb..29b9e74ed8a 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java @@ -141,7 +141,6 @@ public class ListenersST extends AbstractST { @TestDoc( description = @Desc("Test sending messages over plain transport, without auth"), steps = { - @Step(value = "Create test storage instance", expected = "Instance of TestStorage is created"), @Step(value = "Create Kafka resources with wait", expected = "Kafka broker, controller, and topic are created"), @Step(value = "Log transmission message", expected = "Transmission message is logged"), @Step(value = "Produce and consume messages with plain clients", expected = "Messages are successfully produced and consumed"), @@ -181,7 +180,6 @@ void testSendMessagesPlainAnonymous() { @TestDoc( description = @Desc("Test sending messages over tls transport using mutual tls auth."), steps = { - @Step(value = "Create TestStorage instance", expected = "TestStorage object is created"), @Step(value = "Create Kafka node pool resources", expected = "Persistent storage node pools are created"), @Step(value = "Disable plain listener and enable tls listener in Kafka resource", expected = "Kafka with plain listener disabled and tls listener enabled is created"), @Step(value = "Create Kafka topic and user", expected = "Kafka topic and tls user are created"), @@ -260,7 +258,6 @@ void testSendMessagesTlsAuthenticated() { @TestDoc( description = @Desc("Test sending messages over plain transport using scram sha auth."), steps = { - @Step(value = "Initialize test storage and resources", expected = "Test storage and resources are initialized"), @Step(value = "Create Kafka brokers and controllers", expected = "Kafka brokers and controllers are created"), @Step(value = "Enable Kafka with plain listener disabled and scram sha auth", expected = "Kafka instance with scram sha auth is enabled on a specified listener"), @Step(value = "Set up topic and user", expected = "Kafka topic and Kafka user are set up with scram sha auth credentials"), @@ -338,7 +335,6 @@ void testSendMessagesPlainScramSha() { @TestDoc( description = @Desc("Test sending messages over TLS transport using SCRAM-SHA authentication."), steps = { - @Step(value = "Initialize test storage with test context", expected = "Test storage is initialized"), @Step(value = "Create resources for Kafka node pools", expected = "Kafka node pools are created"), @Step(value = "Create Kafka cluster with SCRAM-SHA-512 authentication", expected = "Kafka cluster is created with SCRAM-SHA authentication"), @Step(value = "Create Kafka topic and user", expected = "Kafka topic and user are created"), @@ -421,7 +417,6 @@ void testSendMessagesTlsScramSha() { @TestDoc( description = @Desc("Test custom listener configured with scram SHA authentication and TLS."), steps = { - @Step(value = "Initialize test storage and resource manager", expected = "Resources are initialized successfully"), @Step(value = "Create a Kafka cluster with broker and controller node pools", expected = "Kafka cluster is created with node pools"), @Step(value = "Create a Kafka cluster with custom listener using TLS and SCRAM-SHA authentication", expected = "Kafka cluster with custom listener is ready"), @Step(value = "Create a Kafka topic and SCRAM-SHA user", expected = "Kafka topic and user are created"), @@ -482,7 +477,6 @@ void testSendMessagesCustomListenerTlsScramSha() { @TestDoc( description = @Desc("Test checking the functionality of Kafka cluster with NodePort external listener configurations."), steps = { - @Step(value = "Initialize TestStorage object and set labels and annotations", expected = "TestStorage object is initialized and labels and annotations are set"), @Step(value = "Create resource with Kafka broker pool and controller pool", expected = "Resources with Kafka pools are created successfully"), @Step(value = "Create Kafka cluster with NodePort and TLS listeners", expected = "Kafka cluster is set up with the specified listeners"), @Step(value = "Create ExternalKafkaClient and verify message production and consumption", expected = "Messages are produced and consumed successfully"), @@ -598,7 +592,6 @@ void testNodePort() { @TestDoc( description = @Desc("Test verifying that NodePort configuration can be overridden for Kafka brokers and bootstrap service."), steps = { - @Step(value = "Initialize TestStorage instance and define broker and bootstrap NodePort values.", expected = "TestStorage instance is created and NodePort values are initialized."), @Step(value = "Create Kafka broker and controller pools using resource manager.", expected = "Kafka broker and controller pools are created successfully."), @Step(value = "Deploy Kafka cluster with overridden NodePort configuration for brokers and bootstrap.", expected = "Kafka cluster is deployed with specified NodePort values."), @Step(value = "Verify that the bootstrap service NodePort matches the configured value.", expected = "Bootstrap NodePort matches the configured value of 32100."), @@ -679,7 +672,6 @@ void testOverrideNodePortConfiguration() { @TestDoc( description = @Desc("Test the NodePort TLS functionality for Kafka brokers in a Kubernetes environment."), steps = { - @Step(value = "Initialize test storage with the test context", expected = "Test storage is initialized with the test context"), @Step(value = "Create Kafka broker and controller node pools", expected = "Broker and controller node pools are created"), @Step(value = "Deploy Kafka cluster with NodePort listener and TLS enabled", expected = "Kafka cluster is deployed with NodePort listener and TLS"), @Step(value = "Create a Kafka topic", expected = "Kafka topic is created"), @@ -860,7 +852,6 @@ void testLoadBalancerTls() { @TestDoc( description = @Desc("Test verifies the functionality of Kafka with a cluster IP listener."), steps = { - @Step(value = "Initialize the test storage and resource manager", expected = "Test storage and resource manager are initialized"), @Step(value = "Create the Kafka broker and controller pools", expected = "Kafka broker and controller pools are created"), @Step(value = "Create the Kafka cluster with a cluster IP listener", expected = "Kafka cluster with cluster IP listener is created"), @Step(value = "Retrieve the cluster IP bootstrap address", expected = "Cluster IP bootstrap address is correctly retrieved"), @@ -904,7 +895,6 @@ void testClusterIp() { @TestDoc( description = @Desc("This test validates the creation of Kafka resources with TLS authentication, ensuring proper setup and functionality of the Kafka cluster in a parallel namespace."), steps = { - @Step(value = "Create TestStorage instance and initialize resource manager with broker and controller node pools", expected = "Broker and controller node pools are set up successfully"), @Step(value = "Create ephemeral Kafka cluster with TLS enabled on ClusterIP listener", expected = "Kafka cluster is created with TLS enabled listener on port 9103"), @Step(value = "Create Kafka user with TLS authentication", expected = "Kafka user is created successfully"), @Step(value = "Retrieve the ClusterIP bootstrap address for the Kafka cluster", expected = "Bootstrap address for the Kafka cluster is retrieved"), @@ -1065,7 +1055,6 @@ void testCustomSoloCertificatesForNodePort() { @TestDoc( description = @Desc("Test verifies the custom chain certificates configuration for Kafka NodePort listener."), steps = { - @Step(value = "Initialize test storage.", expected = "Test storage is initialized."), @Step(value = "Generate custom root CA and intermediate certificates.", expected = "Root and intermediate certificates are generated."), @Step(value = "Generate end entity certificates using intermediate CA.", expected = "End entity certificates are generated."), @Step(value = "Export certificates to PEM files.", expected = "Certificates are exported to PEM files."), @@ -1175,7 +1164,6 @@ void testCustomChainCertificatesForNodePort() { @TestDoc( description = @Desc("Test verifying custom solo certificates for load balancer in a Kafka cluster."), steps = { - @Step(value = "Initialize test storage", expected = "Test storage is initialized with the current test context"), @Step(value = "Create custom secret", expected = "Custom secret is created with the specified certificate and key"), @Step(value = "Create Kafka resources with node pools", expected = "Kafka brokers and controller pools are created and configured"), @Step(value = "Create Kafka cluster with listeners", expected = "Kafka cluster is created with internal and load balancer listeners using the custom certificates"), @@ -1489,7 +1477,6 @@ void testCustomSoloCertificatesForRoute() { @TestDoc( description = @Desc("Test to verify custom chain certificates for a Kafka Route."), steps = { - @Step(value = "Initialize TestStorage", expected = "TestStorage instance is created and associated resources are initialized"), @Step(value = "Generate root and intermediate certificates", expected = "Root and intermediate CA keys are generated"), @Step(value = "Create cluster custom certificate chain and root CA secrets", expected = "Custom certificate chain and root CA secrets are created in OpenShift"), @Step(value = "Create Kafka cluster with custom certificates", expected = "Kafka cluster is deployed with custom certificates for internal and external listeners"), @@ -1599,7 +1586,6 @@ void testCustomChainCertificatesForRoute() { @TestDoc( description = @Desc("This test verifies the behavior of Kafka with custom certificates for load balancer and TLS rolling updates."), steps = { - @Step(value = "Initialize test storage", expected = "Test storage initialized with context"), @Step(value = "Create custom secrets for Kafka clusters", expected = "Secrets created and available in namespace"), @Step(value = "Deploy Kafka resources with load balancer and internal TLS listener", expected = "Kafka resources deployed with respective configurations"), @Step(value = "Create Kafka user and retrieve certificates", expected = "Kafka user created and certificates retrieved from Kafka status and secrets"), @@ -2418,7 +2404,6 @@ void testCustomCertRouteAndTlsRollingUpdate() { @TestDoc( description = @Desc("Test for verifying non-existing custom certificate handling by creating necessary resources and ensuring correct error message check."), steps = { - @Step(value = "Initialize TestStorage object", expected = "TestStorage instance is created"), @Step(value = "Create necessary Kafka node pools", expected = "Kafka node pools are created and initialized"), @Step(value = "Create Kafka cluster with a listener using non-existing certificate", expected = "Kafka cluster resource is initialized with non-existing TLS certificate"), @Step(value = "Wait for pods to be ready if not in KRaft mode", expected = "Pods are ready"), @@ -2469,7 +2454,6 @@ void testNonExistingCustomCertificate() { @TestDoc( description = @Desc("Test checking behavior when Kafka is configured with a non-existing certificate in the TLS listener."), steps = { - @Step(value = "Initialize TestStorage object with test context.", expected = "TestStorage object is initialized."), @Step(value = "Define non-existing certificate name.", expected = "Non-existing certificate name is defined."), @Step(value = "Create a custom secret for Kafka with the defined certificate.", expected = "Custom secret created successfully."), @Step(value = "Create Kafka node pools resources.", expected = "Kafka node pools resources created."), @@ -2526,7 +2510,6 @@ void testCertificateWithNonExistingDataCrt() { @TestDoc( description = @Desc("Test verifies that a Kafka cluster correctly identifies and reports the absence of a specified custom certificate private key."), steps = { - @Step(value = "Initialize test storage.", expected = "Test storage is initialized with the current test context."), @Step(value = "Define the non-existing certificate key.", expected = "The non-existing certificate key string is defined."), @Step(value = "Create a custom secret with a certificate for Kafka server.", expected = "Custom secret is created in the namespace."), @Step(value = "Create broker and controller resources with node pools.", expected = "Resources are created and ready."), @@ -2583,7 +2566,6 @@ void testCertificateWithNonExistingDataKey() { @TestDoc( description = @Desc("Validates that messages can be sent and received over TLS with SCRAM-SHA authentication using a predefined password, and that the password can be updated and still be functional."), steps = { - @Step(value = "Create a test storage instance", expected = "Test storage is created"), @Step(value = "Create and encode the initial password", expected = "Initial password is encoded"), @Step(value = "Create and encode the secondary password", expected = "Secondary password is encoded"), @Step(value = "Create a secret in Kubernetes with the initial password", expected = "Secret is created and contains the initial password"), @@ -2692,7 +2674,6 @@ void testMessagesTlsScramShaWithPredefinedPassword() { @TestDoc( description = @Desc("Verify that advertised hostnames appear correctly in broker certificates."), steps = { - @Step(value = "Initialize TestStorage object", expected = "TestStorage object is initialized"), @Step(value = "Define internal and external advertised hostnames and ports", expected = "Hostnames and ports are defined and listed"), @Step(value = "Create broker configurations with advertised hostnames and ports", expected = "Broker configurations are created"), @Step(value = "Deploy resources with Wait function and create Kafka instance", expected = "Resources and Kafka instance are successfully created"), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java index 415b030c514..ddcc257a801 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java @@ -56,9 +56,6 @@ @Tag(REGRESSION) @SuiteDoc( description = @Desc("Test to verify the functionality of using multiple NodePorts in a Kafka cluster within the same namespace."), - beforeTestSteps = { - @Step(value = "Initialize TestStorage with current test context", expected = "TestStorage object is initialized successfully") - }, labels = { @Label(value = TestDocsLabels.KAFKA) } @@ -77,7 +74,6 @@ public class MultipleListenersST extends AbstractST { @TestDoc( description = @Desc("Test verifying the functionality of using multiple NodePorts in a Kafka cluster within the same namespace."), steps = { - @Step(value = "Initialize TestStorage with current test context", expected = "TestStorage object is initialized successfully"), @Step(value = "Execute listener tests with NodePort configuration", expected = "Listener tests run without issues using NodePort") }, labels = { @@ -93,7 +89,6 @@ void testMultipleNodePorts() { @TestDoc( description = @Desc("Test to verify the usage of more than one Kafka cluster within a single namespace."), steps = { - @Step(value = "Initialize TestStorage with the test context", expected = "TestStorage instance is created"), @Step(value = "Run the internal Kafka listeners test", expected = "Listeners test runs successfully on the specified cluster") }, labels = { @@ -113,7 +108,6 @@ void testMultipleInternal() { description = @Desc("Test verifying the combination of internal and external Kafka listeners."), steps = { @Step(value = "Check if the environment supports cluster-wide NodePort rights", expected = "Test is skipped if the environment is not suitable"), - @Step(value = "Initialize test storage with context", expected = "Test storage is successfully initialized"), @Step(value = "Retrieve and combine internal and NodePort listeners", expected = "Listeners are successfully retrieved and combined"), @Step(value = "Run listeners test with combined listeners", expected = "Listeners test is executed successfully") }, @@ -144,7 +138,6 @@ void testCombinationOfInternalAndExternalListeners() { @TestDoc( description = @Desc("Test verifying the behavior of multiple load balancers in a single namespace using more than one Kafka cluster."), steps = { - @Step(value = "Initialize TestStorage instance", expected = "TestStorage instance is created with proper context"), @Step(value = "Run listeners test with LOADBALANCER type", expected = "Listeners test executes successfully with load balancers"), @Step(value = "Validate the results", expected = "Results match the expected outcomes for multiple load balancers") }, @@ -164,7 +157,6 @@ void testMultipleLoadBalancers() { @TestDoc( description = @Desc("Test to verify the functionality of multiple Kafka route listeners in a single namespace."), steps = { - @Step(value = "Initialize the TestStorage object with the current test context", expected = "TestStorage object is created successfully"), @Step(value = "Retrieve test cases for Kafka Listener Type ROUTE", expected = "Test cases for ROUTE are retrieved"), @Step(value = "Run listener tests using the retrieved test cases and cluster name", expected = "Listener tests run successfully with no errors") }, @@ -184,7 +176,6 @@ void testMultipleRoutes() { @TestDoc( description = @Desc("Test ensuring that different types of external Kafka listeners (ROUTE and NODEPORT) work correctly when mixed."), steps = { - @Step(value = "Initialize TestStorage", expected = "TestStorage is initialized with TestContext"), @Step(value = "Retrieve route listeners", expected = "Route listeners are retrieved from test cases"), @Step(value = "Retrieve nodeport listeners", expected = "Nodeport listeners are retrieved from test cases"), @Step(value = "Combine route and nodeport listeners", expected = "Multiple different listeners list is populated"), @@ -216,7 +207,6 @@ void testMixtureOfExternalListeners() { @TestDoc( description = @Desc("Verifies the combination of every kind of Kafka listener: INTERNAL, NODEPORT, ROUTE, and LOADBALANCER."), steps = { - @Step(value = "Create test storage instance", expected = "Test storage instance created with current test context"), @Step(value = "Retrieve different types of Kafka listeners", expected = "Lists of INTERNAL, NODEPORT, ROUTE, and LOADBALANCER listeners are retrieved"), @Step(value = "Combine all different listener lists", expected = "A combined list of all Kafka listener types is created"), @Step(value = "Run listeners test with combined listener list", expected = "Listeners test runs with all types of Kafka listeners in the combined list") From 01f5aa1d8b52bc8ae149f36c104f724109356187 Mon Sep 17 00:00:00 2001 From: see-quick Date: Mon, 30 Sep 2024 10:06:13 +0200 Subject: [PATCH 03/12] add Kafka docs Signed-off-by: see-quick --- ...rimzi.systemtest.kafka.ConfigProviderST.md | 37 ++ ...trimzi.systemtest.kafka.KafkaNodePoolST.md | 97 ++++ .../io.strimzi.systemtest.kafka.KafkaST.md | 216 +++++++ ...trimzi.systemtest.kafka.KafkaVersionsST.md | 34 ++ .../io.strimzi.systemtest.kafka.QuotasST.md | 55 ++ ...trimzi.systemtest.kafka.TieredStorageST.md | 38 ++ ...afka.dynamicconfiguration.DynamicConfST.md | 76 +++ ...ynamicconfiguration.DynamicConfSharedST.md | 18 + ....systemtest.kafka.listeners.ListenersST.md | 548 ++++++++++++++++++ ...est.kafka.listeners.MultipleListenersST.md | 123 ++++ development-docs/systemtests/labels/kafka.md | 61 +- 11 files changed, 1302 insertions(+), 1 deletion(-) create mode 100644 development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md create mode 100644 development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md create mode 100644 development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md create mode 100644 development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md create mode 100644 development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md create mode 100644 development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md create mode 100644 development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md create mode 100644 development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md create mode 100644 development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md create mode 100644 development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md new file mode 100644 index 00000000000..c8e2323a5ea --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md @@ -0,0 +1,37 @@ +# ConfigProviderST + +**Description:** Test ensuring Kafka Connect works properly using ConfigMap and EnvVar configuration. + +**Before tests execution steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy cluster operator across all namespaces, with custom configuration | Cluster operator is deployed | + +**Labels:** + +* [kafka](labels/kafka.md) + +
+ +## testConnectWithConnectorUsingConfigAndEnvProvider + +**Description:** Test ensuring Kafka Connect works properly using ConfigMap and EnvVar configuration. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create broker and controller pools | Resources are created and are in ready state | +| 2. | Create Kafka cluster | Kafka cluster is ready with 3 brokers | +| 3. | Create ConfigMap for connector configuration | ConfigMap with connector configuration is created | +| 4. | Deploy Kafka Connect with external configuration | Kafka Connect is deployed with proper configuration | +| 5. | Create necessary Role and RoleBinding for connector | Role and RoleBinding are created and applied | +| 6. | Deploy Kafka connector | Kafka connector is successfully deployed | +| 7. | Deploy Kafka clients | Kafka clients are deployed and ready | +| 8. | Send messages and verify they are written to file sink | Messages are successfully written to the specified file sink | + +**Labels:** + +* [kafka](labels/kafka.md) + diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md new file mode 100644 index 00000000000..9f221f85914 --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md @@ -0,0 +1,97 @@ +# KafkaNodePoolST + +**Description:** This test suite verifies various functionalities of Kafka Node Pools in a Kafka cluster. + +**Before tests execution steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Ensure the environment is not using OLM or Helm and Kafka Node Pools are enabled | Environment is validated | +| 2. | Install the default cluster operator | Cluster operator is installed | + +**Labels:** + +* [kafka](labels/kafka.md) + +
+ +## testKafkaManagementTransferToAndFromKafkaNodePool + +**Description:** This test case verifies transfer of Kafka Cluster from and to management by KafkaNodePool, by creating corresponding Kafka and KafkaNodePool custom resources and manipulating according Kafka annotation. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy Kafka with annotated to enable management by KafkaNodePool, and KafkaNodePool targeting given Kafka Cluster. | Kafka is deployed, KafkaNodePool custom resource is targeting Kafka Cluster as expected. | +| 2. | Modify KafkaNodePool by increasing number of Kafka Replicas. | Number of Kafka Pods is increased to match specification from KafkaNodePool. | +| 3. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | +| 4. | Modify Kafka custom resource annotation strimzi.io/node-pool to disable management by KafkaNodePool. | StrimziPodSet is modified, replacing former one, Pods are replaced and specification from KafkaNodePool (i.e., changed replica count) are ignored. | +| 5. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | +| 6. | Modify Kafka custom resource annotation strimzi.io/node-pool to enable management by KafkaNodePool. | New StrimziPodSet is created, replacing former one, Pods are replaced and specification from KafkaNodePool (i.e., changed replica count) has priority over Kafka specification. | +| 7. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testKafkaNodePoolBrokerIdsManagementUsingAnnotations + +**Description:** This test case verifies the management of broker IDs in Kafka Node Pools using annotations. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy a Kafka instance with annotations to manage Node Pools and Initial NodePool (Initial) to hold Topics and act as controller. | Kafka instance is deployed according to Kafka and KafkaNodePool custom resource, with IDs 90, 91. | +| 2. | Deploy additional 2 NodePools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]). | NodePools are deployed, NodePool A contains ID 4, NodePool B contains IDs 6, 0. | +| 3. | Annotate NodePool A 'next-node-ids' and NodePool B 'remove-node-ids' respectively ([20-21],[6,55]) afterward scale to 4 and 1 replicas resp. | NodePools are scaled, NodePool A contains IDs 4, 20, 21, 1. NodePool B contains ID 0. | +| 4. | Annotate NodePool A 'remove-node-ids' and NodePool B 'next-node-ids' respectively ([20],[1]) afterward scale to 2 and 6 replicas resp. | NodePools are scaled, NodePool A contains IDs 1, 4. NodePool B contains IDs 2, 3, 5. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testNodePoolsAdditionAndRemoval + +**Description:** This test case verifies the possibility of adding and removing Kafka Node Pools into an existing Kafka cluster. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy a Kafka instance with annotations to manage Node Pools and Initial 2 NodePools. | Kafka instance is deployed according to Kafka and KafkaNodePool custom resource. | +| 2. | Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully, KafkaTopic created and cleaned as expected. | +| 3. | Add extra KafkaNodePool with broker role to the Kafka. | KafkaNodePool is deployed and ready. | +| 4. | Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully, KafkaTopic created and cleaned as expected. | +| 5. | Remove one of kafkaNodePool with broker role. | KafkaNodePool is removed, Pods are deleted, but other pods in Kafka are stable and ready. | +| 6. | Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully, KafkaTopic created and cleaned as expected. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testNodePoolsRolesChanging + +**Description:** This test case verifies changing of roles in Kafka Node Pools. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy a Kafka instance with annotations to manage Node Pools and Initial 2 NodePools, both with mixed role, first one stable, second one which will be modified. | Kafka instance with initial Node Pools is deployed. | +| 2. | Create KafkaTopic with replica number requiring all Kafka Brokers to be present. | KafkaTopic is created. | +| 3. | Annotate one of Node Pools to perform manual Rolling Update. | Rolling Update started. | +| 4. | Change role of Kafka Node Pool from mixed to controller only role. | Role Change is prevented due to existing KafkaTopic replicas and ongoing Rolling Update. | +| 5. | Original Rolling Update finishes successfully. | Rolling Update is completed. | +| 6. | Delete previously created KafkaTopic. | KafkaTopic is deleted and Node Pool role change is initiated. | +| 7. | Change role of Kafka Node Pool from controller only to mixed role. | Kafka Node Pool changes role to mixed role. | +| 8. | Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present. | Messages are produced and consumed successfully. | + +**Labels:** + +* [kafka](labels/kafka.md) + diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md new file mode 100644 index 00000000000..2e60b529384 --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md @@ -0,0 +1,216 @@ +# KafkaST + +**Description:** Suite containing kafka related stuff (i.e., JVM resources, EO, TO or UO removal from Kafka cluster), which ensures proper functioning of Kafka clusters. + +**Before tests execution steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy cluster operator across all namespaces, with custom configuration | Cluster operator is deployed | + +**Labels:** + +* [kafka](labels/kafka.md) + +
+ +## testAdditionalVolumes + +**Description:** This test validates the mounting and usage of additional volumes for Kafka, Kafka Connect, and Kafka Bridge components. It tests whether secret and config map volumes are correctly created, mounted, and accessible across various deployments. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Setup environment prerequisites and configure test storage. | Ensure the environment is in KRaft mode. | +| 2. | Create necessary Kafka resources with additional volumes for secrets and config maps. | Resources are correctly instantiated with specified volumes. | +| 3. | Deploy Kafka, Kafka Connect, and Kafka Bridge with these volumes. | Components are correctly configured with additional volumes. | +| 4. | Verify that all pods (Kafka, Connect, and Bridge) have additional volumes mounted and accessible. | Volumes are correctly mounted and usable within pods. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testDeployUnsupportedKafka + +**Description:** Test to ensure that deploying Kafka with an unsupported version results in the expected error. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Initialize test storage with current context | Test storage is initialized | +| 2. | Create Kafka node pools | Kafka node pools are created and ready | +| 3. | Deploy Kafka with a non-existing version | Kafka deployment with non-supported version begins | +| 4. | Log Kafka deployment process | Log entry for Kafka deployment is created | +| 5. | Wait for Kafka to not be ready | Kafka is not ready as expected | +| 6. | Verify Kafka status message for unsupported version | Error message for unsupported version is found in Kafka status | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testJvmAndResources + +**Description:** This test case verifies that Pod's resources (limits and requests), custom JVM configurations, and expected Java configuration are propagated correctly to Pods, containers, and processes. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy Kafka and its components with custom specifications, including specifying resources and JVM configuration | Kafka and its components (ZooKeeper, Entity Operator) are deployed | +| 2. | For each of components (Kafka, ZooKeeper, Topic Operator, User Operator), verify specified configuration of JVM, resources, and also environment variables | Each of the components has requests and limits assigned correctly, JVM, and environment variables configured according to the specification. | +| 3. | Wait for a time to observe that none of initiated components needed Rolling Update | All of Kafka components remained in stable state. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testKRaftMode + +**Description:** This test case verifies basic working of Kafka Cluster managed by Cluster Operator with KRaft. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy Kafka annotated to enable KRaft (and additionally annotated to enable management by KafkaNodePool due to default usage of NodePools), and KafkaNodePool targeting given Kafka Cluster. | Kafka is deployed, KafkaNodePool custom resource is targeting Kafka Cluster as expected. | +| 2. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | +| 3. | Trigger manual Rolling Update. | Rolling update is triggered and completed shortly after. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testKafkaJBODDeleteClaimsTrueFalse + +**Description:** This test case verifies that Kafka with persistent storage, and JBOD storage, property 'delete claim' of JBOD storage. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy Kafka with persistent storage and JBOD storage with 2 volumes, both of these are configured to delete their Persistent Volume Claims on Kafka cluster un-provision. | Kafka is deployed, volumes are labeled and linked to Pods correctly. | +| 2. | Verify that labels in Persistent Volume Claims are set correctly. | Persistent Volume Claims do contain expected labels and values. | +| 3. | Modify Kafka Custom Resource, specifically 'delete claim' property of its first Kafka Volume. | Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by Cluster Operator. | +| 4. | Delete Kafka cluster. | Kafka cluster and its components are deleted, including Persistent Volume Claim of Volume with 'delete claim' property set to true. | +| 5. | Verify remaining Persistent Volume Claims. | Persistent Volume Claim referenced by volume of formerly deleted Kafka Custom Resource with property 'delete claim' set to true is still present. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testLabelsExistenceAndManipulation + +**Description:** This test case verifies the presence of expected Strimzi specific labels, also labels and annotations specified by user. Some of user-specified labels are later modified (new one is added, one is modified) which triggers rolling update after which all changes took place as expected. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy Kafka with persistent storage and specify custom labels in CR metadata, and also other labels and annotation in PVC metadata | Kafka is deployed with its default labels and all others specified by user. | +| 2. | Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected | Producer and Consumer are able to produce and consume messages respectively. | +| 3. | Modify configuration of Kafka CR with addition of new labels and modification of existing | Kafka is rolling and new labels are present in Kafka CR, and managed resources | +| 4. | Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected | Producer and Consumer are able to produce and consume messages respectively. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testMessagesAndConsumerOffsetFilesOnDisk + +**Description:** This test case verifies correct storage of messages on disk, and their presence even after rolling update of all Kafka Pods. Test case also checks if offset topic related files are present. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy persistent Kafka with corresponding configuration of offsets topic. | Kafka is created with expected configuration. | +| 2. | Create KafkaTopic with corresponding configuration. | KafkaTopic is created with expected configuration. | +| 3. | Execute command to check presence of offsets topic related files. | Files related to Offset topic are present. | +| 4. | Produce default number of messages to already created topic. | Produced messages are present. | +| 5. | Perform rolling update on all Kafka Pods, in this case single broker. | After rolling update is completed all messages are again present, as they were successfully stored on disk. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testReadOnlyRootFileSystem + +**Description:** This test case verifies that Kafka (with all its components, including Zookeeper, Entity Operator, KafkaExporter, CruiseControl) configured with 'withReadOnlyRootFilesystem' can be deployed and also works correctly. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy persistent Kafka with 3 Kafka and Zookeeper replicas, Entity Operator, CruiseControl, and KafkaExporter. Each component has configuration 'withReadOnlyRootFilesystem' set to true. | Kafka and its components are deployed. | +| 2. | Create Kafka producer and consumer. | Kafka clients are successfully created. | +| 3. | Produce and consume messages using created clients. | Messages are successfully sent and received. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testRegenerateCertExternalAddressChange + +**Description:** Test regenerates certificates after changing Kafka's external address. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create Kafka without external listener | Kafka instance is created without an external listener | +| 2. | Edit Kafka to include an external listener | External listener is correctly added to the Kafka instance | +| 3. | Wait until the Kafka component has rolled | Kafka component rolls successfully with the new external listener | +| 4. | Compare Kafka broker secrets before and after adding external listener | Secrets are different before and after adding the external listener | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testRemoveComponentsFromEntityOperator + +**Description:** This test case verifies the correct deployment of Entity Operator, i.e., including both User Operator and Topic Operator. Entity Operator is firstly modified to exclude User Operator, afterwards it is modified to default configuration, which includes User Operator. The next step is removal of Topic Operator itself and finally, also removing User Operator, with Topic Operator being already removed. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy Kafka with Entity Operator set. | Kafka is deployed, and Entity Operator consist of both Topic and User Operators | +| 2. | Remove User Operator from the Kafka specification | User Operator container is deleted | +| 3. | Set User Operator back in the Kafka specification | User Operator container is recreated | +| 4. | Remove Topic Operator from the Kafka specification | Topic Operator container is removed from Entity Operator | +| 5. | Remove User Operator from the Kafka specification | Entity Operator Pod is removed, as there are no other containers present. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testResizeJbodVolumes + +**Description:** This test verifies the functionality of resizing JBOD storage volumes on a Kafka cluster. It checks that the system can handle volume size changes and performs a rolling update to apply these changes. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy a Kafka cluster with JBOD storage and initial volume sizes. | Kafka cluster is operational. | +| 2. | Produce and consume messages continuously to simulate cluster activity. | Message traffic is consistent. | +| 3. | Increase the size of one of the JBOD volumes. | Volume size change is applied. | +| 4. | Verify that the updated volume size is reflected. | PVC reflects the new size. | +| 5. | Ensure continuous message production and consumption are unaffected during the update process. | Message flow continues without interruption. | + +**Labels:** + +* [kafka](labels/kafka.md) + diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md new file mode 100644 index 00000000000..4255316d38b --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md @@ -0,0 +1,34 @@ +# KafkaVersionsST + +**Description:** Test checking basic functionality for each supported Kafka version. Ensures that Kafka functionality such as deployment, Topic Operator, User Operator, and message transmission via PLAIN and TLS listeners are working correctly. + +**Before tests execution steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy cluster operator with default installation | Cluster operator is deployed | + +**Labels:** + +* [kafka](labels/kafka.md) + +
+ +## testKafkaWithVersion + +**Description:** Test checking basic functionality for each supported Kafka version. Ensures that Kafka functionality such as deployment, Topic Operator, User Operator, and message transmission via PLAIN and TLS listeners are working correctly. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy Kafka cluster with specified version | Kafka cluster is deployed without any issue | +| 2. | Verify the Topic Operator creation | Topic Operator is working correctly | +| 3. | Verify the User Operator creation | User Operator is working correctly with SCRAM-SHA and ACLs | +| 4. | Send and receive messages via PLAIN with SCRAM-SHA | Messages are sent and received successfully | +| 5. | Send and receive messages via TLS | Messages are sent and received successfully | + +**Labels:** + +* [kafka](labels/kafka.md) + diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md new file mode 100644 index 00000000000..557f848691d --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md @@ -0,0 +1,55 @@ +# QuotasST + +**Description:** NOTE: STs in this class will not properly work on `minikube` clusters (and maybe not on other clusters that use local storage), because the calculation of currently used storage is based on the local storage, which can be shared across multiple Docker containers. To properly run this suite, you should use cluster with proper storage. + +**Before tests execution steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy default cluster operator with the required configurations | Cluster operator is deployed | + +**Labels:** + +* [kafka](labels/kafka.md) + +
+ +## testKafkaQuotasPluginIntegration + +**Description:** Test to check Kafka Quotas Plugin for disk space. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Assume the cluster is not Minikube or MicroShift | Cluster is appropriate for the test | +| 2. | Create necessary resources for Kafka and nodes | Resources are created and Kafka is set up with quotas plugin | +| 3. | Send messages without any user; observe quota enforcement | Producer stops after reaching the minimum available bytes | +| 4. | Check Kafka logs for quota enforcement message | Kafka logs contain the expected quota enforcement message | +| 5. | Send messages with excluded user and observe the behavior | Messages are sent successfully without hitting the quota | +| 6. | Clean up resources | Resources are deleted successfully | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testKafkaQuotasPluginWithBandwidthLimitation + +**Description:** Test verifying bandwidth limitations with Kafka quotas plugin. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Set excluded principal | Principal is set | +| 2. | Create Kafka resources including node pools and persistent Kafka with quotas enabled | Kafka resources are created successfully with quotas setup | +| 3. | Create Kafka topic and user with SCRAM-SHA authentication | Kafka topic and SCRAM-SHA user are created successfully | +| 4. | Send messages with normal user | Messages are sent and duration is measured | +| 5. | Send messages with excluded user | Messages are sent and duration is measured | +| 6. | Assert that time taken for normal user is greater than for excluded user | Assertion is successful | + +**Labels:** + +* [kafka](labels/kafka.md) + diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md new file mode 100644 index 00000000000..886a1e32fe5 --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md @@ -0,0 +1,38 @@ +# TieredStorageST + +**Description:** This test suite covers scenarios for Tiered Storage integration implemented within Strimzi. + +**Before tests execution steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create test namespace | Namespace is created | +| 2. | Build Kafka image based on passed parameters like image full name, base image, Dockerfile path (via Kaniko or OpenShift build) | Kafka image is built | +| 3. | Deploy Minio in test namespace and init the client inside the Minio pod | Minio is deployed and client is initialized | +| 4. | Init bucket in Minio for purposes of these tests | Bucket is initialized in Minio | +| 5. | Deploy Strimzi Cluster Operator | Strimzi Cluster Operator is deployed | + +**Labels:** + +* [kafka](labels/kafka.md) + +
+ +## testTieredStorageWithAivenPlugin + +**Description:** This testcase is focused on testing of Tiered Storage integration implemented within Strimzi. The tests use Aiven Tiered Storage plugin - ... + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploys KafkaNodePool resource with Broker NodePool with PV of size 10Gi | KafkaNodePool resource is deployed successfully with specified configuration | +| 2. | Deploys Kafka resource with configuration of Tiered Storage for Aiven plugin, pointing to Minio S3, and with image built in beforeAll | Kafka resource is deployed successfully with Tiered Storage configuration | +| 3. | Creates topic with enabled Tiered Storage sync with size of segments set to 10mb (this is needed for speedup the sync) | Topic is created successfully with Tiered Storage enabled and segment size of 10mb | +| 4. | Starts continuous producer to send data to Kafka | Continuous producer starts sending data to Kafka | +| 5. | Wait until Minio size is not empty (contains data from Kafka) | Minio contains data from Kafka | + +**Labels:** + +* [kafka](labels/kafka.md) + diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md new file mode 100644 index 00000000000..306da359b9d --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md @@ -0,0 +1,76 @@ +# DynamicConfST + +**Description:** DynamicConfST is responsible for verifying that changes in dynamic Kafka configuration do not trigger a rolling update. + +**Before tests execution steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deploy the Cluster Operator | Cluster Operator is installed successfully | + +
+ +## testSimpleDynamicConfiguration + +**Description:** Test for verifying dynamic configuration changes in a Kafka cluster with multiple clusters in one namespace. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Deep copy shard Kafka configuration | Configuration map is duplicated with deep copy | +| 2. | Create resources with wait | Resources are created and ready | +| 3. | Create scraper pod | Scraper pod is created | +| 4. | Retrieve and verify Kafka configurations from ConfigMaps | Configurations meet expected values | +| 5. | Retrieve Kafka broker configuration via CLI | Dynamic configurations are retrieved | +| 6. | Update Kafka configuration for unclean leader election | Configuration is updated and verified for dynamic property | +| 7. | Verify updated Kafka configurations | Updated configurations are persistent and correct | + +**Labels:** + +* `dynamic-configuration` (description file doesn't exist) +* [kafka](labels/kafka.md) + + +## testUpdateToExternalListenerCausesRollingRestart + +**Description:** Ensures that updating to an external listener causes a rolling restart of the Kafka brokers. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create Kafka cluster with internal and external listeners. | Kafka cluster is created with the specified listeners. | +| 2. | Verify initial configurations are correctly set in the broker. | Initial broker configurations are verified. | +| 3. | Update Kafka cluster to change listener types. | Change in listener types triggers rolling update. | +| 4. | Verify the rolling restart is successful. | All broker nodes successfully rolled and Kafka configuration updated. | + +**Labels:** + +* `dynamic-configuration` (description file doesn't exist) +* [kafka](labels/kafka.md) + + +## testUpdateToExternalListenerCausesRollingRestartUsingExternalClients + +**Description:** Test validating that updating Kafka cluster listeners to use external clients causes a rolling restart. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Setup initial Kafka cluster and resources | Kafka cluster and resources are successfully created | +| 2. | Create external Kafka clients and verify message production/consumption on plain listener | Messages are successfully produced and consumed using plain listener | +| 3. | Attempt to produce/consume messages using TLS listener before update | Exception is thrown because the listener is plain | +| 4. | Update Kafka cluster to use external TLS listener | Kafka cluster is updated and rolling restart occurs | +| 5. | Verify message production/consumption using TLS listener after update | Messages are successfully produced and consumed using TLS listener | +| 6. | Attempt to produce/consume messages using plain listener after TLS update | Exception is thrown because the listener is TLS | +| 7. | Revert Kafka cluster listener to plain | Kafka cluster listener is reverted and rolling restart occurs | +| 8. | Verify message production/consumption on plain listener after reverting | Messages are successfully produced and consumed using plain listener | +| 9. | Attempt to produce/consume messages using TLS listener after reverting | Exception is thrown because the listener is plain | + +**Labels:** + +* `dynamic-configuration` (description file doesn't exist) +* [kafka](labels/kafka.md) + diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md new file mode 100644 index 00000000000..28731b2c730 --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md @@ -0,0 +1,18 @@ +# DynamicConfSharedST + +**Description:** DynamicConfigurationSharedST is responsible for verifying that changing dynamic Kafka configuration will not trigger a rolling update. Shared -> for each test case we use the same Kafka resource configuration. + +**Before tests execution steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Run cluster operator installation | Cluster operator is installed | +| 2. | Deploy shared Kafka across all test cases | Shared Kafka is deployed | +| 3. | Deploy scraper pod | Scraper pod is deployed | + +**Labels:** + +* `dynamic-configuration` (description file doesn't exist) +* [kafka](labels/kafka.md) + +
diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md new file mode 100644 index 00000000000..0442eb278b1 --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md @@ -0,0 +1,548 @@ +# ListenersST + +**Description:** This class demonstrates various tests for Kafka listeners using different authentication mechanisms. + +**Before tests execution steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Install the cluster operator with default settings | Cluster operator is installed successfully | + +**Labels:** + +* [kafka](labels/kafka.md) + +
+ +## testAdvertisedHostNamesAppearsInBrokerCerts + +**Description:** Verify that advertised hostnames appear correctly in broker certificates. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Define internal and external advertised hostnames and ports | Hostnames and ports are defined and listed | +| 2. | Create broker configurations with advertised hostnames and ports | Broker configurations are created | +| 3. | Deploy resources with Wait function and create Kafka instance | Resources and Kafka instance are successfully created | +| 4. | Retrieve broker certificates from Kubernetes secrets | Certificates are retrieved correctly from secrets | +| 5. | Validate that each broker's certificate contains the expected internal and external advertised hostnames | Certificates contain the correct advertised hostnames | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCertificateWithNonExistingDataCrt + +**Description:** Test checking behavior when Kafka is configured with a non-existing certificate in the TLS listener. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Define non-existing certificate name. | Non-existing certificate name is defined. | +| 2. | Create a custom secret for Kafka with the defined certificate. | Custom secret created successfully. | +| 3. | Create Kafka node pools resources. | Kafka node pools resources created. | +| 4. | Create Kafka cluster with ephemeral storage and the non-existing certificate. | Kafka cluster creation initiated. | +| 5. | Wait for controller pods to be ready if in non-KRaft mode. | Controller pods are ready. | +| 6. | Wait until Kafka status message indicates missing certificate. | Error message about missing certificate is found in Kafka status condition. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCertificateWithNonExistingDataKey + +**Description:** Test verifies that a Kafka cluster correctly identifies and reports the absence of a specified custom certificate private key. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Define the non-existing certificate key. | The non-existing certificate key string is defined. | +| 2. | Create a custom secret with a certificate for Kafka server. | Custom secret is created in the namespace. | +| 3. | Create broker and controller resources with node pools. | Resources are created and ready. | +| 4. | Deploy a Kafka cluster with a listener using the custom secret and non-existing key. | Deployment initiated without waiting for the resources to be ready. | +| 5. | If not in KRaft mode, wait for controller pods to be ready. | Controller pods are in ready state (if applicable). | +| 6. | Check Kafka status condition for custom certificate error message. | Error message indicating the missing custom certificate private key is present in Kafka status conditions. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testClusterIp + +**Description:** Test verifies the functionality of Kafka with a cluster IP listener. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create the Kafka broker and controller pools | Kafka broker and controller pools are created | +| 2. | Create the Kafka cluster with a cluster IP listener | Kafka cluster with cluster IP listener is created | +| 3. | Retrieve the cluster IP bootstrap address | Cluster IP bootstrap address is correctly retrieved | +| 4. | Deploy Kafka clients | Kafka clients are deployed successfully | +| 5. | Wait for Kafka clients to succeed | Kafka clients successfully produce and consume messages | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testClusterIpTls + +**Description:** This test validates the creation of Kafka resources with TLS authentication, ensuring proper setup and functionality of the Kafka cluster in a parallel namespace. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create ephemeral Kafka cluster with TLS enabled on ClusterIP listener | Kafka cluster is created with TLS enabled listener on port 9103 | +| 2. | Create Kafka user with TLS authentication | Kafka user is created successfully | +| 3. | Retrieve the ClusterIP bootstrap address for the Kafka cluster | Bootstrap address for the Kafka cluster is retrieved | +| 4. | Instantiate TLS Kafka Clients (producer and consumer) | TLS Kafka clients are instantiated successfully | +| 5. | Wait for the Kafka Clients to complete their tasks and verify success | Kafka Clients complete their tasks successfully | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCustomCertLoadBalancerAndTlsRollingUpdate + +**Description:** This test verifies the behavior of Kafka with custom certificates for load balancer and TLS rolling updates. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create custom secrets for Kafka clusters | Secrets created and available in namespace | +| 2. | Deploy Kafka resources with load balancer and internal TLS listener | Kafka resources deployed with respective configurations | +| 3. | Create Kafka user and retrieve certificates | Kafka user created and certificates retrieved from Kafka status and secrets | +| 4. | Compare Kafka certificates with secret certificates | Certificates from Kafka status and secrets match | +| 5. | Verify message production and consumption using an external Kafka client | Messages successfully produced and consumed over SSL | +| 6. | Trigger and verify TLS rolling update | TLS rolling update completed successfully | +| 7. | Repeat certificate verification steps after rolling update | Certificates from Kafka status and secrets match post update | +| 8. | Repeatedly produce and consume messages to ensure Kafka stability | Messages successfully produced and consumed, ensuring stability | +| 9. | Revert the certificate updates and verify Kafka status | Certificates reverted and verified, Kafka operates normally | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCustomCertNodePortAndTlsRollingUpdate + +**Description:** Test verifies custom certificates with NodePort and rolling update in Kafka. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Generate root and intermediate certificates | Certificates are generated successfully | +| 2. | Generate end-entity certificates | End-entity certificates are generated successfully | +| 3. | Create custom secrets with generated certificates | Secrets are created in Kubernetes | +| 4. | Deploy Kafka cluster with custom NodePort and TLS settings | Kafka cluster is deployed and running | +| 5. | Verify messages sent and received through external Kafka client | Messages are produced and consumed successfully | +| 6. | Perform rolling update and update certificates in custom secrets | Rolling update is performed and certificates are updated | +| 7. | Verify messages sent and received after rolling update | Messages are produced and consumed successfully after update | +| 8. | Restore default certificate configuration and perform rolling update | Default certificates are restored and rolling update is completed | +| 9. | Verify messages sent and received with restored configuration | Messages are produced and consumed successfully with restored configuration | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCustomCertRouteAndTlsRollingUpdate + +**Description:** This test verifies the custom certificate handling and TLS rolling update mechanisms for Kafka brokers using OpenShift-specific configurations. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create various certificate chains and export them to PEM files | Certificates are created and exported successfully | +| 2. | Create custom secrets with the generated certificates | Secrets are created in the specified namespace | +| 3. | Deploy Kafka cluster and TLS user with specified configurations | Kafka cluster and TLS user are deployed successfully | +| 4. | Verify certificates in KafkaStatus match those in the secrets | Certificates are verified to match | +| 5. | Use external Kafka client to produce and consume messages | Messages are produced and consumed successfully | +| 6. | Update Kafka listeners with new certificates and perform rolling update | Kafka cluster rolls out successfully with updated certificates | +| 7. | Verify certificates in KafkaStatus match after update | Certificates are verified to match after the update | +| 8. | Repeat message production and consumption with updated certificates | Messages are produced and consumed successfully with new certificates | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCustomChainCertificatesForLoadBalancer + +**Description:** Verifies custom certificate chain configuration for Kafka load balancer, ensuring proper secret creation, resource setup, and message sending/receiving functionality. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create custom secrets for certificate chains and root CA | Secrets are created successfully | +| 2. | Deploy Kafka broker and controller pools with custom certificates | Kafka pools are deployed without issues | +| 3. | Deploy Kafka cluster with custom listener configurations | Kafka cluster is deployed with custom listener configurations | +| 4. | Set up Kafka topic and user | Kafka topic and user are created successfully | +| 5. | Verify message production and consumption via external Kafka client with TLS | Messages are produced and consumed successfully | +| 6. | Set up Kafka clients for further messaging operations | Kafka clients are set up without issues | +| 7. | Produce messages using Kafka producer | Messages are produced successfully | +| 8. | Consume messages using Kafka consumer | Messages are consumed successfully | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCustomChainCertificatesForNodePort + +**Description:** Test verifies the custom chain certificates configuration for Kafka NodePort listener. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Generate custom root CA and intermediate certificates. | Root and intermediate certificates are generated. | +| 2. | Generate end entity certificates using intermediate CA. | End entity certificates are generated. | +| 3. | Export certificates to PEM files. | Certificates are exported to PEM files. | +| 4. | Create Kubernetes secrets with the custom certificates. | Custom certificate secrets are created. | +| 5. | Deploy Kafka cluster with NodePort listener using the custom certificates. | Kafka cluster is deployed successfully. | +| 6. | Create a Kafka user with TLS authentication. | Kafka user is created. | +| 7. | Verify message production and consumption with external Kafka client. | Messages are produced and consumed successfully. | +| 8. | Verify message production with internal Kafka client. | Messages are produced successfully. | +| 9. | Verify message consumption with internal Kafka client. | Messages are consumed successfully. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCustomChainCertificatesForRoute + +**Description:** Test to verify custom chain certificates for a Kafka Route. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Generate root and intermediate certificates | Root and intermediate CA keys are generated | +| 2. | Create cluster custom certificate chain and root CA secrets | Custom certificate chain and root CA secrets are created in OpenShift | +| 3. | Create Kafka cluster with custom certificates | Kafka cluster is deployed with custom certificates for internal and external listeners | +| 4. | Create Kafka user | Kafka user with TLS authentication is created | +| 5. | Verify message production and consumption with external Kafka client | Messages are produced and consumed successfully using the external Kafka client | +| 6. | Create Kafka clients for internal message production and consumption | Internal Kafka clients are created and configured with TLS authentication | +| 7. | Verify internal message production with Kafka client | Messages are produced successfully using the internal Kafka client | +| 8. | Verify internal message consumption with Kafka client | Messages are consumed successfully using the internal Kafka client | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCustomSoloCertificatesForLoadBalancer + +**Description:** Test verifying custom solo certificates for load balancer in a Kafka cluster. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create custom secret | Custom secret is created with the specified certificate and key | +| 2. | Create Kafka resources with node pools | Kafka brokers and controller pools are created and configured | +| 3. | Create Kafka cluster with listeners | Kafka cluster is created with internal and load balancer listeners using the custom certificates | +| 4. | Create TLS user | TLS user is created | +| 5. | Verify produced and consumed messages via external client | Messages are successfully produced and consumed using the custom certificates | +| 6. | Create and verify TLS producer client | TLS producer client is created and verified for success | +| 7. | Create and verify TLS consumer client | TLS consumer client is created and verified for success | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCustomSoloCertificatesForNodePort + +**Description:** Test custom certificates in Kafka listeners, specifically for the NodePort type. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Generate Root CA certificate and key | Root CA certificate and key are generated | +| 2. | Generate Intermediate CA certificate and key using Root CA | Intermediate CA certificate and key are generated | +| 3. | Generate Kafka Broker certificate and key using Intermediate CA | Broker certificate and key are generated | +| 4. | Export generated certificates and keys to PEM files | PEM files are created with certificates and keys | +| 5. | Create custom secret with the PEM files | Custom secret is created within the required namespace | +| 6. | Deploy and wait for Kafka cluster resources with custom certificates | Kafka cluster is deployed successfully with custom certificates | +| 7. | Create and wait for TLS KafkaUser | TLS KafkaUser is created successfully | +| 8. | Produce and consume messages using ExternalKafkaClient | Messages are successfully produced and consumed | +| 9. | Produce and consume messages using internal TLS client | Messages are successfully produced and consumed with internal TLS client | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCustomSoloCertificatesForRoute + +**Description:** Test custom solo certificates for Kafka route and client communication. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Generate root CA certificate and key | Root CA certificate and key are generated | +| 2. | Generate intermediate CA certificate and key | Intermediate CA certificate and key are generated | +| 3. | Generate end-entity certificate and key for Strimzi | End-entity certificate and key for Strimzi are generated | +| 4. | Export certificates and keys to PEM files | Certificates and keys are exported to PEM files | +| 5. | Create custom secret with certificates and keys | Custom secret is created in the namespace with certificates and keys | +| 6. | Deploy Kafka cluster with custom certificates | Kafka cluster is deployed with custom certificates | +| 7. | Create TLS Kafka user | TLS Kafka user is created | +| 8. | Verify client communication using external Kafka client | Messages are successfully produced and consumed using external Kafka client | +| 9. | Deploy Kafka clients with custom certificates | Kafka clients are deployed with custom certificates | +| 10. | Verify client communication using internal Kafka client | Messages are successfully produced and consumed using internal Kafka client | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testLoadBalancer + +**Description:** Test verifying load balancer functionality with external clients. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create instances for broker pool and controller pool using NodePoolsConverter and KafkaNodePoolTemplates | Resources are created and ready for use | +| 2. | Create Kafka cluster with ephemeral storage and load balancer listener | Kafka cluster is created with the specified configuration | +| 3. | Wait until the load balancer address is reachable | Address is reachable | +| 4. | Configure external Kafka client and send messages | Messages are sent successfully | +| 5. | Verify that messages are correctly produced and consumed | Messages are produced and consumed as expected | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testLoadBalancerTls + +**Description:** Test validating the TLS connection through a Kafka LoadBalancer. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create and configure Kafka node pools | Node pools for brokers and controllers are created | +| 2. | Create and configure Kafka cluster with TLS listener | Kafka cluster with TLS enabled LoadBalancer listener is created | +| 3. | Create and configure Kafka user with TLS authentication | Kafka user with TLS authentication is created | +| 4. | Wait for the LoadBalancer address to be reachable | LoadBalancer address becomes reachable | +| 5. | Send and receive messages using external Kafka client | Messages are successfully produced and consumed over the TLS connection | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testMessagesTlsScramShaWithPredefinedPassword + +**Description:** Validates that messages can be sent and received over TLS with SCRAM-SHA authentication using a predefined password, and that the password can be updated and still be functional. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create and encode the initial password | Initial password is encoded | +| 2. | Create and encode the secondary password | Secondary password is encoded | +| 3. | Create a secret in Kubernetes with the initial password | Secret is created and contains the initial password | +| 4. | Verify the password in the secret | Password in the secret is verified to be correct | +| 5. | Create a KafkaUser with SCRAM-SHA authentication using the secret | KafkaUser is created with correct authentication settings | +| 6. | Create Kafka cluster and topic with SCRAM-SHA authentication | Kafka cluster and topic are created correctly | +| 7. | Produce and consume messages using TLS and SCRAM-SHA | Messages are successfully transmitted and received | +| 8. | Update the secret with the secondary password | Secret is updated with the new password | +| 9. | Wait for the user password change to take effect | Password change is detected and applied | +| 10. | Produce and consume messages with the updated password | Messages are successfully transmitted and received with the new password | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testNodePort + +**Description:** Test checking the functionality of Kafka cluster with NodePort external listener configurations. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create resource with Kafka broker pool and controller pool | Resources with Kafka pools are created successfully | +| 2. | Create Kafka cluster with NodePort and TLS listeners | Kafka cluster is set up with the specified listeners | +| 3. | Create ExternalKafkaClient and verify message production and consumption | Messages are produced and consumed successfully | +| 4. | Check Kafka status for proper listener addresses | Listener addresses in Kafka status are validated successfully | +| 5. | Check ClusterRoleBinding annotations and labels in Kafka cluster | Annotations and labels match the expected values | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testNodePortTls + +**Description:** Test the NodePort TLS functionality for Kafka brokers in a Kubernetes environment. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create Kafka broker and controller node pools | Broker and controller node pools are created | +| 2. | Deploy Kafka cluster with NodePort listener and TLS enabled | Kafka cluster is deployed with NodePort listener and TLS | +| 3. | Create a Kafka topic | Kafka topic is created | +| 4. | Create a Kafka user with TLS authentication | Kafka user with TLS authentication is created | +| 5. | Configure external Kafka client and send and receive messages using TLS | External Kafka client sends and receives messages using TLS successfully | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testNonExistingCustomCertificate + +**Description:** Test for verifying non-existing custom certificate handling by creating necessary resources and ensuring correct error message check. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create necessary Kafka node pools | Kafka node pools are created and initialized | +| 2. | Create Kafka cluster with a listener using non-existing certificate | Kafka cluster resource is initialized with non-existing TLS certificate | +| 3. | Wait for pods to be ready if not in KRaft mode | Pods are ready | +| 4. | Wait for Kafka status condition message indicating the non-existing secret | Correct error message regarding the non-existing secret appears | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testOverrideNodePortConfiguration + +**Description:** Test verifying that NodePort configuration can be overridden for Kafka brokers and bootstrap service. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create Kafka broker and controller pools using resource manager. | Kafka broker and controller pools are created successfully. | +| 2. | Deploy Kafka cluster with overridden NodePort configuration for brokers and bootstrap. | Kafka cluster is deployed with specified NodePort values. | +| 3. | Verify that the bootstrap service NodePort matches the configured value. | Bootstrap NodePort matches the configured value of 32100. | +| 4. | Verify that the broker service NodePort matches the configured value. | Broker NodePort matches the configured value of 32000. | +| 5. | Produce and consume messages using an external Kafka client. | Messages are produced and consumed successfully using the external client. | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testSendMessagesCustomListenerTlsScramSha + +**Description:** Test custom listener configured with scram SHA authentication and TLS. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create a Kafka cluster with broker and controller node pools | Kafka cluster is created with node pools | +| 2. | Create a Kafka cluster with custom listener using TLS and SCRAM-SHA authentication | Kafka cluster with custom listener is ready | +| 3. | Create a Kafka topic and SCRAM-SHA user | Kafka topic and user are created | +| 4. | Transmit messages over TLS using SCRAM-SHA authentication | Messages are transmitted successfully | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testSendMessagesPlainAnonymous + +**Description:** Test sending messages over plain transport, without auth + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create Kafka resources with wait | Kafka broker, controller, and topic are created | +| 2. | Log transmission message | Transmission message is logged | +| 3. | Produce and consume messages with plain clients | Messages are successfully produced and consumed | +| 4. | Validate Kafka service discovery annotation | The discovery annotation is validated successfully | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testSendMessagesPlainScramSha + +**Description:** Test sending messages over plain transport using scram sha auth. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create Kafka brokers and controllers | Kafka brokers and controllers are created | +| 2. | Enable Kafka with plain listener disabled and scram sha auth | Kafka instance with scram sha auth is enabled on a specified listener | +| 3. | Set up topic and user | Kafka topic and Kafka user are set up with scram sha auth credentials | +| 4. | Check logs in broker pod for authentication | Logs show that scram sha authentication succeeded | +| 5. | Send messages over plain transport using scram sha authentication | Messages are successfully sent over plain transport using scram sha auth | +| 6. | Verify service discovery annotation | Service discovery annotation is checked and validated | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testSendMessagesTlsAuthenticated + +**Description:** Test sending messages over tls transport using mutual tls auth. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create Kafka node pool resources | Persistent storage node pools are created | +| 2. | Disable plain listener and enable tls listener in Kafka resource | Kafka with plain listener disabled and tls listener enabled is created | +| 3. | Create Kafka topic and user | Kafka topic and tls user are created | +| 4. | Configure and deploy Kafka clients | Kafka clients producer and consumer with tls are deployed | +| 5. | Wait for clients to successfully send and receive messages | Clients successfully send and receive messages over tls | +| 6. | Assert that the service discovery contains expected info | Service discovery matches expected info | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testSendMessagesTlsScramSha + +**Description:** Test sending messages over TLS transport using SCRAM-SHA authentication. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Create resources for Kafka node pools | Kafka node pools are created | +| 2. | Create Kafka cluster with SCRAM-SHA-512 authentication | Kafka cluster is created with SCRAM-SHA authentication | +| 3. | Create Kafka topic and user | Kafka topic and user are created | +| 4. | Transmit messages over TLS using SCRAM-SHA | Messages are successfully transmitted | +| 5. | Check if generated password has the expected length | Password length is as expected | +| 6. | Verify Kafka service discovery annotation | Service discovery annotation is as expected | + +**Labels:** + +* [kafka](labels/kafka.md) + diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md new file mode 100644 index 00000000000..9423eef096c --- /dev/null +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md @@ -0,0 +1,123 @@ +# MultipleListenersST + +**Description:** Test to verify the functionality of using multiple NodePorts in a Kafka cluster within the same namespace. + +**Labels:** + +* [kafka](labels/kafka.md) + +
+ +## testCombinationOfEveryKindOfListener + +**Description:** Verifies the combination of every kind of Kafka listener: INTERNAL, NODEPORT, ROUTE, and LOADBALANCER. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Retrieve different types of Kafka listeners | Lists of INTERNAL, NODEPORT, ROUTE, and LOADBALANCER listeners are retrieved | +| 2. | Combine all different listener lists | A combined list of all Kafka listener types is created | +| 3. | Run listeners test with combined listener list | Listeners test runs with all types of Kafka listeners in the combined list | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testCombinationOfInternalAndExternalListeners + +**Description:** Test verifying the combination of internal and external Kafka listeners. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Check if the environment supports cluster-wide NodePort rights | Test is skipped if the environment is not suitable | +| 2. | Retrieve and combine internal and NodePort listeners | Listeners are successfully retrieved and combined | +| 3. | Run listeners test with combined listeners | Listeners test is executed successfully | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testMixtureOfExternalListeners + +**Description:** Test ensuring that different types of external Kafka listeners (ROUTE and NODEPORT) work correctly when mixed. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Retrieve route listeners | Route listeners are retrieved from test cases | +| 2. | Retrieve nodeport listeners | Nodeport listeners are retrieved from test cases | +| 3. | Combine route and nodeport listeners | Multiple different listeners list is populated | +| 4. | Run listeners test | Listeners test runs using the combined list | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testMultipleInternal + +**Description:** Test to verify the usage of more than one Kafka cluster within a single namespace. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Run the internal Kafka listeners test | Listeners test runs successfully on the specified cluster | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testMultipleLoadBalancers + +**Description:** Test verifying the behavior of multiple load balancers in a single namespace using more than one Kafka cluster. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Run listeners test with LOADBALANCER type | Listeners test executes successfully with load balancers | +| 2. | Validate the results | Results match the expected outcomes for multiple load balancers | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testMultipleNodePorts + +**Description:** Test verifying the functionality of using multiple NodePorts in a Kafka cluster within the same namespace. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Execute listener tests with NodePort configuration | Listener tests run without issues using NodePort | + +**Labels:** + +* [kafka](labels/kafka.md) + + +## testMultipleRoutes + +**Description:** Test to verify the functionality of multiple Kafka route listeners in a single namespace. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Retrieve test cases for Kafka Listener Type ROUTE | Test cases for ROUTE are retrieved | +| 2. | Run listener tests using the retrieved test cases and cluster name | Listener tests run successfully with no errors | + +**Labels:** + +* [kafka](labels/kafka.md) + diff --git a/development-docs/systemtests/labels/kafka.md b/development-docs/systemtests/labels/kafka.md index 30151222386..28c2eebf013 100644 --- a/development-docs/systemtests/labels/kafka.md +++ b/development-docs/systemtests/labels/kafka.md @@ -4,4 +4,63 @@ These tests validate the core Apache Kafka functionality within the Strimzi ecosystem, ensuring the reliability, scalability, and correctness of Kafka clusters. They cover various aspects such as dynamic configuration updates, listener configurations, node pool management, version upgrades, quotas, and tiered storage. -These tests are crucial to ensure that Kafka clusters can handle production workloads. \ No newline at end of file +These tests are crucial to ensure that Kafka clusters can handle production workloads. + + +**Tests:** +- [testReadOnlyRootFileSystem](../io.strimzi.systemtest.kafka.KafkaST.md) +- [testLabelsExistenceAndManipulation](../io.strimzi.systemtest.kafka.KafkaST.md) +- [testSendMessagesCustomListenerTlsScramSha](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testKafkaJBODDeleteClaimsTrueFalse](../io.strimzi.systemtest.kafka.KafkaST.md) +- [testCertificateWithNonExistingDataKey](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testResizeJbodVolumes](../io.strimzi.systemtest.kafka.KafkaST.md) +- [testCertificateWithNonExistingDataCrt](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testCombinationOfEveryKindOfListener](../io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md) +- [testSendMessagesTlsScramSha](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testMixtureOfExternalListeners](../io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md) +- [testMessagesTlsScramShaWithPredefinedPassword](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testKafkaManagementTransferToAndFromKafkaNodePool](../io.strimzi.systemtest.kafka.KafkaNodePoolST.md) +- [testCustomSoloCertificatesForLoadBalancer](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testCustomChainCertificatesForRoute](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testCombinationOfInternalAndExternalListeners](../io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md) +- [testClusterIp](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testCustomCertRouteAndTlsRollingUpdate](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testRegenerateCertExternalAddressChange](../io.strimzi.systemtest.kafka.KafkaST.md) +- [testJvmAndResources](../io.strimzi.systemtest.kafka.KafkaST.md) +- [testMultipleLoadBalancers](../io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md) +- [testUpdateToExternalListenerCausesRollingRestart](../io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md) +- [testKafkaWithVersion](../io.strimzi.systemtest.kafka.KafkaVersionsST.md) +- [testClusterIpTls](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testMessagesAndConsumerOffsetFilesOnDisk](../io.strimzi.systemtest.kafka.KafkaST.md) +- [testDeployUnsupportedKafka](../io.strimzi.systemtest.kafka.KafkaST.md) +- [testRemoveComponentsFromEntityOperator](../io.strimzi.systemtest.kafka.KafkaST.md) +- [testCustomChainCertificatesForLoadBalancer](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testCustomChainCertificatesForNodePort](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testMultipleRoutes](../io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md) +- [testSendMessagesPlainAnonymous](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testOverrideNodePortConfiguration](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testTieredStorageWithAivenPlugin](../io.strimzi.systemtest.kafka.TieredStorageST.md) +- [testLoadBalancerTls](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testNodePoolsRolesChanging](../io.strimzi.systemtest.kafka.KafkaNodePoolST.md) +- [testNodePort](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testSendMessagesPlainScramSha](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testCustomSoloCertificatesForRoute](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testUpdateToExternalListenerCausesRollingRestartUsingExternalClients](../io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md) +- [testNodePortTls](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testNonExistingCustomCertificate](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testSendMessagesTlsAuthenticated](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testAdvertisedHostNamesAppearsInBrokerCerts](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testNodePoolsAdditionAndRemoval](../io.strimzi.systemtest.kafka.KafkaNodePoolST.md) +- [testAdditionalVolumes](../io.strimzi.systemtest.kafka.KafkaST.md) +- [testMultipleNodePorts](../io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md) +- [testCustomCertNodePortAndTlsRollingUpdate](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testKafkaQuotasPluginWithBandwidthLimitation](../io.strimzi.systemtest.kafka.QuotasST.md) +- [testKRaftMode](../io.strimzi.systemtest.kafka.KafkaST.md) +- [testKafkaNodePoolBrokerIdsManagementUsingAnnotations](../io.strimzi.systemtest.kafka.KafkaNodePoolST.md) +- [testLoadBalancer](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testKafkaQuotasPluginIntegration](../io.strimzi.systemtest.kafka.QuotasST.md) +- [testCustomCertLoadBalancerAndTlsRollingUpdate](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testSimpleDynamicConfiguration](../io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md) +- [testMultipleInternal](../io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md) +- [testCustomSoloCertificatesForNodePort](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testConnectWithConnectorUsingConfigAndEnvProvider](../io.strimzi.systemtest.kafka.ConfigProviderST.md) From 48357bfb1dfc628d8bb6dbdd7a45cf5c82ecd35c Mon Sep 17 00:00:00 2001 From: see-quick Date: Tue, 1 Oct 2024 11:56:41 +0200 Subject: [PATCH 04/12] review from Paul Signed-off-by: see-quick --- ...imzi.systemtest.bridge.HttpBridgeCorsST.md | 30 ++++----- ....strimzi.systemtest.bridge.HttpBridgeST.md | 58 ++++++++-------- ....systemtest.bridge.HttpBridgeScramShaST.md | 36 +++++----- ...rimzi.systemtest.bridge.HttpBridgeTlsST.md | 34 +++++----- ...mzi.systemtest.connect.ConnectBuilderST.md | 16 ++--- ...io.strimzi.systemtest.connect.ConnectST.md | 42 ++++++------ ...rimzi.systemtest.kafka.ConfigProviderST.md | 22 +++---- ...trimzi.systemtest.kafka.KafkaNodePoolST.md | 36 +++++----- .../io.strimzi.systemtest.kafka.KafkaST.md | 56 ++++++++-------- ...trimzi.systemtest.kafka.KafkaVersionsST.md | 16 ++--- .../io.strimzi.systemtest.kafka.QuotasST.md | 28 ++++---- ...trimzi.systemtest.kafka.TieredStorageST.md | 22 +++---- ...afka.dynamicconfiguration.DynamicConfST.md | 34 +++++----- ...ynamicconfiguration.DynamicConfSharedST.md | 6 +- ....systemtest.kafka.listeners.ListenersST.md | 66 +++++++++---------- ...est.kafka.listeners.MultipleListenersST.md | 32 ++++----- .../systemtest/bridge/HttpBridgeCorsST.java | 30 ++++----- .../systemtest/bridge/HttpBridgeST.java | 58 ++++++++-------- .../bridge/HttpBridgeScramShaST.java | 36 +++++----- .../systemtest/bridge/HttpBridgeTlsST.java | 34 +++++----- .../systemtest/connect/ConnectBuilderST.java | 16 ++--- .../strimzi/systemtest/connect/ConnectST.java | 42 ++++++------ .../systemtest/kafka/ConfigProviderST.java | 22 +++---- .../systemtest/kafka/KafkaNodePoolST.java | 40 +++++------ .../io/strimzi/systemtest/kafka/KafkaST.java | 64 +++++++++--------- .../systemtest/kafka/KafkaVersionsST.java | 16 ++--- .../io/strimzi/systemtest/kafka/QuotasST.java | 28 ++++---- .../systemtest/kafka/TieredStorageST.java | 22 +++---- .../dynamicconfiguration/DynamicConfST.java | 42 ++++++------ .../DynamicConfSharedST.java | 6 +- .../kafka/listeners/ListenersST.java | 66 +++++++++---------- .../kafka/listeners/MultipleListenersST.java | 32 ++++----- 32 files changed, 544 insertions(+), 544 deletions(-) diff --git a/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeCorsST.md b/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeCorsST.md index dd270119306..11e3ac85069 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeCorsST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeCorsST.md @@ -6,8 +6,8 @@ | Step | Action | Result | | - | - | - | -| 1. | Set up Kafka Bridge and its configuration including CORS settings | Kafka Bridge is set up with the correct configuration | -| 2. | Deploy required Kafka resources and scraper pod | Kafka resources and scraper pod are deployed and running | +| 1. | Set up Kafka Bridge and its configuration including CORS settings. | Kafka Bridge is set up with the correct configuration. | +| 2. | Deploy required Kafka resources and scraper pod. | Kafka resources and scraper pod are deployed and running. | **Labels:** @@ -23,13 +23,13 @@ | Step | Action | Result | | - | - | - | -| 1. | Create Kafka Bridge user and consumer group | Kafka Bridge user and consumer group are created successfully | -| 2. | Set up headers with forbidden origin and pre-flight HTTP OPTIONS method | Headers and method are set correctly | -| 3. | Send HTTP OPTIONS request to the Bridge | HTTP OPTIONS request is sent to the Bridge and a response is received | -| 4. | Verify the response contains '403' and 'CORS Rejected - Invalid origin' | Response indicates the CORS request is rejected | -| 5. | Remove 'Access-Control-Request-Method' from headers and set HTTP POST method | Headers are updated and HTTP method is set correctly | -| 6. | Send HTTP POST request to the Bridge | HTTP POST request is sent to the Bridge and a response is received | -| 7. | Verify the response contains '403' and 'CORS Rejected - Invalid origin' | Response indicates the CORS request is rejected | +| 1. | Create Kafka Bridge user and consumer group. | Kafka Bridge user and consumer group are created successfully. | +| 2. | Set up headers with forbidden origin and pre-flight HTTP OPTIONS method. | Headers and method are set correctly. | +| 3. | Send HTTP OPTIONS request to the Bridge. | HTTP OPTIONS request is sent to the Bridge and a response is received. | +| 4. | Verify the response contains '403' and 'CORS Rejected - Invalid origin'. | Response indicates the CORS request is rejected. | +| 5. | Remove 'Access-Control-Request-Method' from headers and set HTTP POST method. | Headers are updated and HTTP method is set correctly. | +| 6. | Send HTTP POST request to the Bridge. | HTTP POST request is sent to the Bridge and a response is received. | +| 7. | Verify the response contains '403' and 'CORS Rejected - Invalid origin'. | Response indicates the CORS request is rejected. | **Labels:** @@ -44,12 +44,12 @@ | Step | Action | Result | | - | - | - | -| 1. | Set up the Kafka Bridge user and configuration | Kafka Bridge user and configuration are set up | -| 2. | Construct the request URL and headers | URL and headers are constructed properly | -| 3. | Send OPTIONS request to Kafka Bridge and capture the response | Response is captured from Bridge | -| 4. | Validate the response contains expected status codes and headers | Response has correct status codes and headers for allowed origin | -| 5. | Send GET request to Kafka Bridge and capture the response | Response is captured from Bridge for GET request | -| 6. | Check if the GET request response is '404 Not Found' | Response for GET request is 404 Not Found | +| 1. | Set up the Kafka Bridge user and configuration. | Kafka Bridge user and configuration are set up. | +| 2. | Construct the request URL and headers. | URL and headers are constructed properly. | +| 3. | Send OPTIONS request to Kafka Bridge and capture the response. | Response is captured from Bridge. | +| 4. | Validate the response contains expected status codes and headers. | Response has correct status codes and headers for allowed origin. | +| 5. | Send GET request to Kafka Bridge and capture the response. | Response is captured from Bridge for GET request. | +| 6. | Check if the GET request response is '404 Not Found'. | Response for GET request is 404 Not Found. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeST.md b/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeST.md index 3245fb46e6d..2faf9ea70dd 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Initialize Test Storage and deploy Kafka and Kafka Bridge | Kafka and Kafka Bridge are deployed with necessary configuration | +| 1. | Initialize Test Storage and deploy Kafka and Kafka Bridge. | Kafka and Kafka Bridge are deployed with necessary configuration. | **Labels:** @@ -42,12 +42,12 @@ | Step | Action | Result | | - | - | - | -| 1. | Create a Kafka Bridge resource with initial configuration | Kafka Bridge is created and deployed with the specified initial configuration | -| 2. | Remove an environment variable that is in use | Environment variable TEST_ENV_1 is removed from the initial configuration | -| 3. | Verify initial probe values and environment variables | The probe values and environment variables match the initial configuration | -| 4. | Update Kafka Bridge resource with new configuration | Kafka Bridge is updated and redeployed with the new configuration | -| 5. | Verify updated probe values and environment variables | The probe values and environment variables match the updated configuration | -| 6. | Verify Kafka Bridge configurations for producer and consumer | Producer and consumer configurations match the updated settings | +| 1. | Create a Kafka Bridge resource with initial configuration. | Kafka Bridge is created and deployed with the specified initial configuration. | +| 2. | Remove an environment variable that is in use. | Environment variable TEST_ENV_1 is removed from the initial configuration. | +| 3. | Verify initial probe values and environment variables. | The probe values and environment variables match the initial configuration. | +| 4. | Update Kafka Bridge resource with new configuration. | Kafka Bridge is updated and redeployed with the new configuration. | +| 5. | Verify updated probe values and environment variables. | The probe values and environment variables match the updated configuration. | +| 6. | Verify Kafka Bridge configurations for producer and consumer. | Producer and consumer configurations match the updated settings. | **Labels:** @@ -81,10 +81,10 @@ | Step | Action | Result | | - | - | - | -| 1. | Retrieve the Kafka Bridge service using kubeClient | Kafka Bridge service instance is obtained | -| 2. | Extract the discovery annotation from the service metadata | The discovery annotation is retrieved as a string | -| 3. | Convert the discovery annotation to a JsonArray | JsonArray representation of the discovery annotation is created | -| 4. | Validate the content of the JsonArray against expected values | The JsonArray matches the expected service discovery information | +| 1. | Retrieve the Kafka Bridge service using kubeClient. | Kafka Bridge service instance is obtained. | +| 2. | Extract the discovery annotation from the service metadata. | The discovery annotation is retrieved as a string. | +| 3. | Convert the discovery annotation to a JsonArray. | JsonArray representation of the discovery annotation is created. | +| 4. | Validate the content of the JsonArray against expected values. | The JsonArray matches the expected service discovery information. | **Labels:** @@ -100,11 +100,11 @@ | Step | Action | Result | | - | - | - | -| 1. | Initialize the test storage | TestStorage instance is initialized | -| 2. | Create Kafka topic resource | Kafka topic resource is created with specified configurations | -| 3. | Setup and deploy Kafka Bridge consumer client | Kafka Bridge consumer client is set up and started receiving messages | -| 4. | Send messages using Kafka producer | Messages are sent to Kafka successfully | -| 5. | Verify message reception | All messages are received by Kafka Bridge consumer client | +| 1. | Initialize the test storage. | TestStorage instance is initialized. | +| 2. | Create Kafka topic resource. | Kafka topic resource is created with specified configurations. | +| 3. | Setup and deploy Kafka Bridge consumer client. | Kafka Bridge consumer client is set up and started receiving messages. | +| 4. | Send messages using Kafka producer. | Messages are sent to Kafka successfully. | +| 5. | Verify message reception. | All messages are received by Kafka Bridge consumer client. | **Labels:** @@ -137,10 +137,10 @@ | Step | Action | Result | | - | - | - | -| 1. | Create a KafkaBridge resource and wait for it to be ready | KafkaBridge resource is created and ready with 1 replica | -| 2. | Fetch the current number of KafkaBridge pods | There should be exactly 1 KafkaBridge pod initially | -| 3. | Scale KafkaBridge to zero replicas | Scaling action is acknowledged | -| 4. | Wait for KafkaBridge to scale down to zero replicas | KafkaBridge scales down to zero replicas correctly | +| 1. | Create a KafkaBridge resource and wait for it to be ready. | KafkaBridge resource is created and ready with 1 replica. | +| 2. | Fetch the current number of KafkaBridge pods. | There should be exactly 1 KafkaBridge pod initially. | +| 3. | Scale KafkaBridge to zero replicas. | Scaling action is acknowledged. | +| 4. | Wait for KafkaBridge to scale down to zero replicas. | KafkaBridge scales down to zero replicas correctly. | | 5. | Check the number of KafkaBridge pods after scaling | No KafkaBridge pods should be running | | 6. | Verify the status of KafkaBridge | KafkaBridge status should indicate it is ready with zero replicas | @@ -157,15 +157,15 @@ | Step | Action | Result | | - | - | - | -| 1. | Initialize test storage | Test storage is initialized with necessary context | -| 2. | Create a Kafka Bridge client job | Kafka Bridge client job is configured and instantiated | -| 3. | Create Kafka topic | Kafka topic is successfully created | -| 4. | Start Kafka Bridge producer | Kafka Bridge producer successfully begins sending messages | -| 5. | Wait for producer success | All messages are sent successfully | -| 6. | Start Kafka consumer | Kafka consumer is instantiated and starts consuming messages | -| 7. | Wait for consumer success | All messages are consumed successfully | -| 8. | Verify Kafka Bridge pod labels | Labels for Kafka Bridge pods are correctly set and verified | -| 9. | Verify Kafka Bridge service labels | Labels for Kafka Bridge service are correctly set and verified | +| 1. | Initialize test storage. | Test storage is initialized with necessary context. | +| 2. | Create a Kafka Bridge client job. | Kafka Bridge client job is configured and instantiated. | +| 3. | Create Kafka topic. | Kafka topic is successfully created. | +| 4. | Start Kafka Bridge producer. | Kafka Bridge producer successfully begins sending messages. | +| 5. | Wait for producer success. | All messages are sent successfully. | +| 6. | Start Kafka consumer. | Kafka consumer is instantiated and starts consuming messages. | +| 7. | Wait for consumer success. | All messages are consumed successfully. | +| 8. | Verify Kafka Bridge pod labels. | Labels for Kafka Bridge pods are correctly set and verified. | +| 9. | Verify Kafka Bridge service labels. | Labels for Kafka Bridge service are correctly set and verified. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeScramShaST.md b/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeScramShaST.md index 0d11a5ee668..033a961598a 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeScramShaST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeScramShaST.md @@ -6,12 +6,12 @@ | Step | Action | Result | | - | - | - | -| 1. | Create TestStorage instance | TestStorage instance is created | -| 2. | Create BridgeClients instance | BridgeClients instance is created | -| 3. | Deploy Kafka and KafkaBridge | Kafka and KafkaBridge are deployed successfully | -| 4. | Create Kafka topic | Kafka topic is created with the given configuration | -| 5. | Create Kafka user with SCRAM-SHA authentication | Kafka user is created and configured with SCRAM-SHA authentication | -| 6. | Deploy HTTP bridge | HTTP bridge is deployed | +| 1. | Create TestStorage instance. | TestStorage instance is created. | +| 2. | Create BridgeClients instance. | BridgeClients instance is created. | +| 3. | Deploy Kafka and KafkaBridge. | Kafka and KafkaBridge are deployed successfully. | +| 4. | Create Kafka topic. | Kafka topic is created with the given configuration. | +| 5. | Create Kafka user with SCRAM-SHA authentication. | Kafka user is created and configured with SCRAM-SHA authentication. | +| 6. | Deploy HTTP bridge. | HTTP bridge is deployed. | **Labels:** @@ -27,11 +27,11 @@ | Step | Action | Result | | - | - | - | -| 1. | Initialize TestStorage and BridgeClientsBuilder instances | Instances are successfully initialized | -| 2. | Create Kafka topic using ResourceManager | Kafka topic is created and available | -| 3. | Create Bridge consumer using ResourceManager | Bridge consumer is successfully created | -| 4. | Send messages to Kafka using KafkaClients | Messages are successfully sent to the Kafka topic | -| 5. | Wait for clients' success validation | Messages are successfully consumed from the Kafka topic | +| 1. | Initialize TestStorage and BridgeClientsBuilder instances. | Instances are successfully initialized. | +| 2. | Create Kafka topic using ResourceManager. | Kafka topic is created and available. | +| 3. | Create Bridge consumer using ResourceManager. | Bridge consumer is successfully created. | +| 4. | Send messages to Kafka using KafkaClients. | Messages are successfully sent to the Kafka topic. | +| 5. | Wait for clients' success validation. | Messages are successfully consumed from the Kafka topic. | **Labels:** @@ -46,13 +46,13 @@ | Step | Action | Result | | - | - | - | -| 1. | Create TestStorage and BridgeClients objects | Instances of TestStorage and BridgeClients are created | -| 2. | Create topic using the resource manager | Topic is created successfully with the specified configuration | -| 3. | Start producing messages via Kafka Bridge | Messages are produced successfully to the topic | -| 4. | Wait for producer success | Producer finishes sending messages without errors | -| 5. | Create KafkaClients and configure with TLS and SCRAM-SHA | Kafka client is configured with appropriate security settings | -| 6. | Start consuming messages via Kafka client | Messages are consumed successfully from the topic | -| 7. | Wait for consumer success | Consumer finishes receiving messages without errors | +| 1. | Create TestStorage and BridgeClients objects. | Instances of TestStorage and BridgeClients are created. | +| 2. | Create topic using the resource manager. | Topic is created successfully with the specified configuration. | +| 3. | Start producing messages via Kafka Bridge. | Messages are produced successfully to the topic. | +| 4. | Wait for producer success. | Producer finishes sending messages without errors. | +| 5. | Create KafkaClients and configure with TLS and SCRAM-SHA. | Kafka client is configured with appropriate security settings. | +| 6. | Start consuming messages via Kafka client. | Messages are consumed successfully from the topic. | +| 7. | Wait for consumer success. | Consumer finishes receiving messages without errors. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeTlsST.md b/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeTlsST.md index 7b6d1e955c0..6acc7d38e95 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeTlsST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.bridge.HttpBridgeTlsST.md @@ -6,10 +6,10 @@ | Step | Action | Result | | - | - | - | -| 1. | Initialize test storage and context | Test storage and context are initialized successfully | -| 2. | Deploy Kafka and KafkaBridge | Kafka and KafkaBridge are deployed and running | -| 3. | Create Kafka user with TLS configuration | Kafka user with TLS configuration is created | -| 4. | Deploy HTTP bridge with TLS configuration | HTTP bridge is deployed with TLS configuration | +| 1. | Initialize test storage and context. | Test storage and context are initialized successfully. | +| 2. | Deploy Kafka and KafkaBridge. | Kafka and KafkaBridge are deployed and running. | +| 3. | Create Kafka user with TLS configuration. | Kafka user with TLS configuration is created. | +| 4. | Deploy HTTP bridge with TLS configuration. | HTTP bridge is deployed with TLS configuration. | **Labels:** @@ -25,13 +25,13 @@ | Step | Action | Result | | - | - | - | -| 1. | Initialize the test storage instance | TestStorage object is instantiated with the test context. | -| 2. | Configure Kafka Bridge client for consumption | Kafka Bridge client is configured with topic and consumer names. | -| 3. | Create Kafka topic with provided configurations | Kafka topic resource is created and available. | -| 4. | Deploy the Kafka Bridge consumer | Kafka Bridge consumer starts successfully and is ready to consume messages. | -| 5. | Initialize TLS Kafka client for message production | TLS Kafka client is configured and initialized. | -| 6. | Deploy the Kafka producer TLS client | TLS Kafka producer client starts successfully and begins sending messages. | -| 7. | Verify message consumption | Messages are successfully consumed by the Kafka Bridge consumer. | +| 1. | Initialize the test storage instance. | TestStorage object is instantiated with the test context. | +| 2. | Configure Kafka Bridge client for consumption. | Kafka Bridge client is configured with topic and consumer names. | +| 3. | Create Kafka topic with provided configurations. | Kafka topic resource is created and available. | +| 4. | Deploy the Kafka Bridge consumer. | Kafka Bridge consumer starts successfully and is ready to consume messages. | +| 5. | Initialize TLS Kafka client for message production. | TLS Kafka client is configured and initialized. | +| 6. | Deploy the Kafka producer TLS client. | TLS Kafka producer client starts successfully and begins sending messages. | +| 7. | Verify message consumption. | Messages are successfully consumed by the Kafka Bridge consumer. | **Labels:** @@ -46,12 +46,12 @@ | Step | Action | Result | | - | - | - | -| 1. | Initialize TestStorage and BridgeClients with TLS configuration | TestStorage and BridgeClients are initialized with TLS configuration | -| 2. | Create Kafka topic using resource manager | Kafka topic is successfully created | -| 3. | Create Kafka Bridge Client job for producing messages | Kafka Bridge Client job is created and produces messages successfully | -| 4. | Verify that the producer successfully sends messages | Producer successfully sends the expected number of messages | -| 5. | Create Kafka client consumer with TLS configuration | Kafka client consumer is created with TLS configuration | -| 6. | Verify that the consumer successfully receives messages | Consumer successfully receives the expected number of messages | +| 1. | Initialize TestStorage and BridgeClients with TLS configuration. | TestStorage and BridgeClients are initialized with TLS configuration. | +| 2. | Create Kafka topic using resource manager. | Kafka topic is successfully created. | +| 3. | Create Kafka Bridge Client job for producing messages. | Kafka Bridge Client job is created and produces messages successfully. | +| 4. | Verify that the producer successfully sends messages. | Producer successfully sends the expected number of messages. | +| 5. | Create Kafka client consumer with TLS configuration. | Kafka client consumer is created with TLS configuration. | +| 6. | Verify that the consumer successfully receives messages. | Consumer successfully receives the expected number of messages. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectBuilderST.md b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectBuilderST.md index 0e55406207b..81e35ad5b05 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectBuilderST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectBuilderST.md @@ -16,14 +16,14 @@ | Step | Action | Result | | - | - | - | -| 1. | Initialize TestStorage and get test image name | TestStorage instance is created and the image name for the test case is retrieved | -| 2. | Create a Plugin with wrong checksum and build Kafka Connect resource with it | Kafka Connect resource is created but the build fails due to wrong checksum | -| 3. | Deploy Scraper pod with specific configurations | Kafka Scraper pod are successfully deployed | -| 4. | Wait for Kafka Connect status to indicate build failure | Kafka Connect status contains message about build failure | -| 5. | Deploy network policies for Kafka Connect | Network policies are successfully deployed for Kafka Connect | -| 6. | Replace the plugin checksum with the correct one and update Kafka Connect resource | Kafka Connect resource is updated with the correct checksum | -| 7. | Wait for Kafka Connect to be ready | Kafka Connect becomes ready | -| 8. | Verify that EchoSink KafkaConnector is available in Kafka Connect API | EchoSink KafkaConnector is returned by Kafka Connect API | +| 1. | Initialize TestStorage and get test image name. | TestStorage instance is created and the image name for the test case is retrieved. | +| 2. | Create a Plugin with wrong checksum and build Kafka Connect resource with it. | Kafka Connect resource is created but the build fails due to wrong checksum. | +| 3. | Deploy Scraper pod with specific configurations. | Kafka Scraper pod are successfully deployed. | +| 4. | Wait for Kafka Connect status to indicate build failure. | Kafka Connect status contains message about build failure. | +| 5. | Deploy network policies for Kafka Connect. | Network policies are successfully deployed for Kafka Connect. | +| 6. | Replace the plugin checksum with the correct one and update Kafka Connect resource. | Kafka Connect resource is updated with the correct checksum. | +| 7. | Wait for Kafka Connect to be ready. | Kafka Connect becomes ready. | +| 8. | Verify that EchoSink KafkaConnector is available in Kafka Connect API. | EchoSink KafkaConnector is returned by Kafka Connect API. | | 9. | Verify that EchoSink KafkaConnector is listed in Kafka Connect resource status | EchoSink KafkaConnector is listed in the status of Kafka Connect resource | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md index c4794cbf929..474e5da864f 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy scraper Pod for accessing all other Pods | Scraper Pod is deployed | +| 1. | Deploy scraper Pod for accessing all other Pods. | Scraper Pod is deployed. | **Labels:** @@ -22,8 +22,8 @@ | Step | Action | Result | | - | - | - | -| 1. | Create resource with Node Pools | Node Pools created successfully | -| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 1. | Create resource with node pools | Node Pools created successfully | +| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | | 3. | Deploy Kafka cluster with SCRAM-SHA-512 authentication | Kafka cluster deployed with specified authentications | | 4. | Create Kafka Topic | Topic created successfully | | 5. | Create Kafka SCRAM-SHA-512 user with a weird username | User created successfully with SCRAM-SHA-512 credentials | @@ -46,7 +46,7 @@ | Step | Action | Result | | - | - | - | | 1. | Set up a name of username containing dots and 64 characters | | -| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | | 3. | Create Kafka broker, controller, topic, and Kafka user with the specified username | Resources are created with the expected configurations | | 4. | Setup Kafka Connect with the created Kafka instance and TLS authentication | Kafka Connect is set up with the expected configurations | | 5. | Check if the user can produce messages to Kafka | Messages are produced successfully | @@ -91,7 +91,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create and configure Kafka Connect with initial values | Kafka Connect is created and configured with initial environment variables and readiness/liveness probes | -| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | | 3. | Verify initial configuration and environment variables | Initial configuration and environment variables are as expected | | 4. | Update Kafka Connect configuration and environment variables | Kafka Connect configuration and environment variables are updated | | 5. | Verify updated configuration and environment variables | Updated configuration and environment variables are as expected | @@ -109,13 +109,13 @@ | Step | Action | Result | | - | - | - | -| 1. | Initialize Test Storage | Test storage instance is created with required context | -| 2. | Define expected configurations | Configurations are loaded from properties file | -| 3. | Create and wait for resources | Kafka resources, including NodePools and KafkaConnect instances, are created and become ready | -| 4. | Annotate for manual rolling update | KafkaConnect components are annotated for a manual rolling update | -| 5. | Perform and wait for rolling update | KafkaConnect components roll and new pods are deployed | -| 6. | Kafka Connect pod | Pod configurations and annotations are verified | -| 7. | Kafka Connectors | Various Kafka Connect resource labels and configurations are verified to ensure correct deployment | +| 1. | Initialize Test Storage. | Test storage instance is created with required context. | +| 2. | Define expected configurations. | Configurations are loaded from properties file. | +| 3. | Create and wait for resources. | Kafka resources, including node pools and KafkaConnect instances, are created and become ready. | +| 4. | Annotate for manual rolling update. | KafkaConnect components are annotated for a manual rolling update. | +| 5. | Perform and wait for rolling update. | KafkaConnect components roll and new pods are deployed. | +| 6. | Kafka Connect pod. | Pod configurations and annotations are verified. | +| 7. | Kafka Connectors. | Various Kafka Connect resource labels and configurations are verified to ensure correct deployment. | **Labels:** @@ -131,7 +131,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create TestStorage instance | TestStorage instance is created | -| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | | 3. | Create broker and controller node pools | Node pools are created and ready | | 4. | Create Kafka cluster | Kafka cluster is created and operational | | 5. | Setup JVM options and resource requirements for Kafka Connect | Kafka Connect is configured with specified JVM options and resources | @@ -151,7 +151,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create and configure test storage | Test storage is set up with necessary configurations. | -| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | | 3. | Create and wait for the broker and controller pools | Broker and controller pools are created and running. | | 4. | Deploy and configure Kafka Connect with File Sink Plugin | Kafka Connect with File Sink Plugin is deployed and configured. | | 5. | Deploy Network Policies for Kafka Connect | Network Policies are successfully deployed for Kafka Connect. | @@ -192,7 +192,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create TestStorage object instance | Instance of TestStorage is created | -| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | | 3. | Create resources for KafkaNodePools and KafkaCluster | Resources are created and ready | | 4. | Deploy Kafka Connect with file plugin | Kafka Connect is deployed with 1 initial replica | | 5. | Verify the initial replica count | Initial replica count is verified to be 1 | @@ -214,8 +214,8 @@ | Step | Action | Result | | - | - | - | -| 1. | Create object instance of TestStorage | Instance of TestStorage is created | -| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 1. | Create object instance of TestStorage. | Instance of TestStorage is created. | +| 2. | Create node pools using resourceManager based on the configuration. | node pools for broker and controller are created or not based on configuration. | | 3. | Deploy Kafka with SCRAM-SHA-512 listener | Kafka is deployed with the specified listener authentication | | 4. | Create KafkaUser with SCRAM-SHA authentication | KafkaUser is created using SCRAM-SHA authentication with the given credentials | | 5. | Create KafkaTopic | KafkaTopic is created | @@ -238,7 +238,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 1. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | | 2. | Create Kafka cluster with SCRAM-SHA authentication | Kafka cluster is created with SCRAM-SHA authentication enabled | | 3. | Create a Kafka user with SCRAM-SHA authentication | Kafka user with SCRAM-SHA authentication is created | | 4. | Deploy Kafka Connect with the created user credentials | Kafka Connect is deployed successfully | @@ -260,7 +260,7 @@ | - | - | - | | 1. | Create Secrets and ConfigMaps | Secrets and ConfigMaps are created successfully. | | 2. | Create Kafka environment | Kafka broker, Kafka Connect, and other resources are deployed successfully. | -| 3. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 3. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | | 4. | Bind Secrets and ConfigMaps to Kafka Connect | Secrets and ConfigMaps are bound to Kafka Connect as volumes and environment variables. | | 5. | Verify environment variables | Kafka Connect environment variables contain expected values from Secrets and ConfigMaps. | | 6. | Verify mounted volumes | Kafka Connect mounted volumes contain expected values from Secrets and ConfigMaps. | @@ -301,7 +301,7 @@ | Step | Action | Result | | - | - | - | | 1. | Initialize the test storage and create broker and controller pools | Broker and controller pools are created successfully | -| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | | 3. | Deploy Kafka, Kafka Connect and Kafka Connector resources | Kafka, Kafka Connect and Kafka Connector resources are deployed successfully | | 4. | Scale Kafka Connect subresource | Kafka Connect subresource is scaled successfully | | 5. | Verify Kafka Connect subresource scaling | Kafka Connect replicas and observed generation are as expected | @@ -394,7 +394,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create test storage instance | Test storage instance is created | -| 2. | Create NodePools using resourceManager based on the configuration | NodePools for broker and controller are created or not based on configuration | +| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | | 3. | Create resources for Kafka broker and Kafka Connect components | Resources are created and ready | | 4. | Configure Kafka broker with TLS listener and client authentication | Kafka broker is configured correctly | | 5. | Deploy Kafka user with TLS authentication | Kafka user is deployed with TLS authentication | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md index c8e2323a5ea..88c173c237c 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md @@ -1,12 +1,12 @@ # ConfigProviderST -**Description:** Test ensuring Kafka Connect works properly using ConfigMap and EnvVar configuration. +**Description:** This test verifies Kafka Connect using ConfigMap and EnvVar configuration. **Before tests execution steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy cluster operator across all namespaces, with custom configuration | Cluster operator is deployed | +| 1. | Deploy cluster operator across all namespaces, with custom configuration. | Cluster operator is deployed. | **Labels:** @@ -16,20 +16,20 @@ ## testConnectWithConnectorUsingConfigAndEnvProvider -**Description:** Test ensuring Kafka Connect works properly using ConfigMap and EnvVar configuration. +**Description:** Tests to ensure Kafka Connect functions correctly using ConfigMap and EnvVar configuration. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Create broker and controller pools | Resources are created and are in ready state | -| 2. | Create Kafka cluster | Kafka cluster is ready with 3 brokers | -| 3. | Create ConfigMap for connector configuration | ConfigMap with connector configuration is created | -| 4. | Deploy Kafka Connect with external configuration | Kafka Connect is deployed with proper configuration | -| 5. | Create necessary Role and RoleBinding for connector | Role and RoleBinding are created and applied | -| 6. | Deploy Kafka connector | Kafka connector is successfully deployed | -| 7. | Deploy Kafka clients | Kafka clients are deployed and ready | -| 8. | Send messages and verify they are written to file sink | Messages are successfully written to the specified file sink | +| 1. | Create broker and controller pools. | Resources are created and are in ready state. | +| 2. | Create Kafka cluster. | Kafka cluster is ready with 3 brokers. | +| 3. | Create ConfigMap for connector configuration. | ConfigMap with connector configuration is created. | +| 4. | Deploy Kafka Connect with external configuration. | Kafka Connect is deployed with proper configuration. | +| 5. | Create necessary Role and RoleBinding for connector. | Role and RoleBinding are created and applied. | +| 6. | Deploy Kafka connector. | Kafka connector is successfully deployed. | +| 7. | Deploy Kafka clients. | Kafka clients are deployed and ready. | +| 8. | Send messages and verify they are written to sink file. | Messages are successfully written to the specified sink file. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md index 9f221f85914..b4749caf7de 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md @@ -1,13 +1,13 @@ # KafkaNodePoolST -**Description:** This test suite verifies various functionalities of Kafka Node Pools in a Kafka cluster. +**Description:** This test suite verifies various functionalities of Kafka node pools in a Kafka cluster. **Before tests execution steps:** | Step | Action | Result | | - | - | - | -| 1. | Ensure the environment is not using OLM or Helm and Kafka Node Pools are enabled | Environment is validated | -| 2. | Install the default cluster operator | Cluster operator is installed | +| 1. | Ensure the environment is not using OLM or Helm and Kafka node pools are enabled. | Environment is validated. | +| 2. | Install the default Cluster Operator. | Cluster operator is installed. | **Labels:** @@ -17,18 +17,18 @@ ## testKafkaManagementTransferToAndFromKafkaNodePool -**Description:** This test case verifies transfer of Kafka Cluster from and to management by KafkaNodePool, by creating corresponding Kafka and KafkaNodePool custom resources and manipulating according Kafka annotation. +**Description:** This test verifies Kafka Cluster migration to and from node pools, using the necessary Kafka and KafkaNodePool resources and annotations. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy Kafka with annotated to enable management by KafkaNodePool, and KafkaNodePool targeting given Kafka Cluster. | Kafka is deployed, KafkaNodePool custom resource is targeting Kafka Cluster as expected. | +| 1. | Deploy a Kafka cluster with the annotation to enable node pool management, and configure a KafkaNodePool resource to target the Kafka cluster. | Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected. | | 2. | Modify KafkaNodePool by increasing number of Kafka Replicas. | Number of Kafka Pods is increased to match specification from KafkaNodePool. | | 3. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | -| 4. | Modify Kafka custom resource annotation strimzi.io/node-pool to disable management by KafkaNodePool. | StrimziPodSet is modified, replacing former one, Pods are replaced and specification from KafkaNodePool (i.e., changed replica count) are ignored. | +| 4. | Disable KafkaNodePool management in the Kafka custom resource using the node pool annotation. | StrimziPodSet is modified, pods are replaced, and any KafkaNodePool specifications (i.e., changed replica count) are ignored. | | 5. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | -| 6. | Modify Kafka custom resource annotation strimzi.io/node-pool to enable management by KafkaNodePool. | New StrimziPodSet is created, replacing former one, Pods are replaced and specification from KafkaNodePool (i.e., changed replica count) has priority over Kafka specification. | +| 6. | Enable node pool management in the Kafka custom resource using the node pool annotation. | New StrimziPodSet is created, pods are replaced , and any KafkaNodePool specifications (i.e., changed replica count) take priority over Kafka specifications. | | 7. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | **Labels:** @@ -38,16 +38,16 @@ ## testKafkaNodePoolBrokerIdsManagementUsingAnnotations -**Description:** This test case verifies the management of broker IDs in Kafka Node Pools using annotations. +**Description:** This test case verifies the management of broker IDs in Kafka node pools using annotations. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy a Kafka instance with annotations to manage Node Pools and Initial NodePool (Initial) to hold Topics and act as controller. | Kafka instance is deployed according to Kafka and KafkaNodePool custom resource, with IDs 90, 91. | -| 2. | Deploy additional 2 NodePools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]). | NodePools are deployed, NodePool A contains ID 4, NodePool B contains IDs 6, 0. | -| 3. | Annotate NodePool A 'next-node-ids' and NodePool B 'remove-node-ids' respectively ([20-21],[6,55]) afterward scale to 4 and 1 replicas resp. | NodePools are scaled, NodePool A contains IDs 4, 20, 21, 1. NodePool B contains ID 0. | -| 4. | Annotate NodePool A 'remove-node-ids' and NodePool B 'next-node-ids' respectively ([20],[1]) afterward scale to 2 and 6 replicas resp. | NodePools are scaled, NodePool A contains IDs 1, 4. NodePool B contains IDs 2, 3, 5. | +| 1. | Deploy a Kafka instance with annotations to manage node pools and one initial node pool to hold topics and act as controller. | Kafka instance is deployed according to Kafka and KafkaNodePool custom resource, with IDs 90, 91. | +| 2. | Deploy additional 2 node pools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]). | node pools are deployed, node pool A contains ID 4, node pool B contains IDs 6, 0. | +| 3. | Annotate node pool A 'next-node-ids' and node pool B 'remove-node-ids' respectively ([20-21],[6,55]) afterward scale to 4 and 1 replicas resp. | node pools are scaled, node pool A contains IDs 4, 20, 21, 1. node pool B contains ID 0. | +| 4. | Annotate node pool A 'remove-node-ids' and node pool B 'next-node-ids' respectively ([20],[1]) afterward scale to 2 and 6 replicas resp. | node pools are scaled, node pool A contains IDs 1, 4. node pool B contains IDs 2, 3, 5. | **Labels:** @@ -56,17 +56,17 @@ ## testNodePoolsAdditionAndRemoval -**Description:** This test case verifies the possibility of adding and removing Kafka Node Pools into an existing Kafka cluster. +**Description:** This test case verifies the possibility of adding and removing Kafka node pools into an existing Kafka cluster. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy a Kafka instance with annotations to manage Node Pools and Initial 2 NodePools. | Kafka instance is deployed according to Kafka and KafkaNodePool custom resource. | +| 1. | Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools. | Kafka instance is deployed according to Kafka and KafkaNodePool custom resource. | | 2. | Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully, KafkaTopic created and cleaned as expected. | | 3. | Add extra KafkaNodePool with broker role to the Kafka. | KafkaNodePool is deployed and ready. | | 4. | Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully, KafkaTopic created and cleaned as expected. | -| 5. | Remove one of kafkaNodePool with broker role. | KafkaNodePool is removed, Pods are deleted, but other pods in Kafka are stable and ready. | +| 5. | Remove one kafkaNodePool with broker role. | KafkaNodePool is removed, Pods are deleted, but other pods in Kafka are stable and ready. | | 6. | Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully, KafkaTopic created and cleaned as expected. | **Labels:** @@ -76,15 +76,15 @@ ## testNodePoolsRolesChanging -**Description:** This test case verifies changing of roles in Kafka Node Pools. +**Description:** This test case verifies changing of roles in Kafka node pools. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy a Kafka instance with annotations to manage Node Pools and Initial 2 NodePools, both with mixed role, first one stable, second one which will be modified. | Kafka instance with initial Node Pools is deployed. | +| 1. | Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools, both with mixed role, first one stable, second one which will be modified. | Kafka instance with initial node pools is deployed. | | 2. | Create KafkaTopic with replica number requiring all Kafka Brokers to be present. | KafkaTopic is created. | -| 3. | Annotate one of Node Pools to perform manual Rolling Update. | Rolling Update started. | +| 3. | Annotate one of node pools to perform manual Rolling Update. | Rolling Update started. | | 4. | Change role of Kafka Node Pool from mixed to controller only role. | Role Change is prevented due to existing KafkaTopic replicas and ongoing Rolling Update. | | 5. | Original Rolling Update finishes successfully. | Rolling Update is completed. | | 6. | Delete previously created KafkaTopic. | KafkaTopic is deleted and Node Pool role change is initiated. | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md index 2e60b529384..beb780975be 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md @@ -1,12 +1,12 @@ # KafkaST -**Description:** Suite containing kafka related stuff (i.e., JVM resources, EO, TO or UO removal from Kafka cluster), which ensures proper functioning of Kafka clusters. +**Description:** Test suite containing kafka related stuff (i.e., JVM resources, EO, TO or UO removal from Kafka cluster), which ensures proper functioning of Kafka clusters. **Before tests execution steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy cluster operator across all namespaces, with custom configuration | Cluster operator is deployed | +| 1. | Deploy Cluster Operator across all namespaces, with custom configuration. | Cluster Operator is deployed. | **Labels:** @@ -40,7 +40,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Initialize test storage with current context | Test storage is initialized | +| 1. | Initialize test storage with current context. | Test storage is initialized. | | 2. | Create Kafka node pools | Kafka node pools are created and ready | | 3. | Deploy Kafka with a non-existing version | Kafka deployment with non-supported version begins | | 4. | Log Kafka deployment process | Log entry for Kafka deployment is created | @@ -60,9 +60,9 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy Kafka and its components with custom specifications, including specifying resources and JVM configuration | Kafka and its components (ZooKeeper, Entity Operator) are deployed | -| 2. | For each of components (Kafka, ZooKeeper, Topic Operator, User Operator), verify specified configuration of JVM, resources, and also environment variables | Each of the components has requests and limits assigned correctly, JVM, and environment variables configured according to the specification. | -| 3. | Wait for a time to observe that none of initiated components needed Rolling Update | All of Kafka components remained in stable state. | +| 1. | Deploy Kafka and its components with custom specifications, including specifying resources and JVM configuration. | Kafka and its components (ZooKeeper, Entity Operator) are deployed. | +| 2. | For each component (Kafka, ZooKeeper, Topic Operator, User Operator), verify specified configuration of JVM, resources, and also environment variables. | Each of the components has requests and limits assigned correctly, JVM, and environment variables configured according to the specification. | +| 3. | Wait for a time to observe that no initiated components need rolling update. | All Kafka components remain in stable state. | **Labels:** @@ -77,7 +77,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy Kafka annotated to enable KRaft (and additionally annotated to enable management by KafkaNodePool due to default usage of NodePools), and KafkaNodePool targeting given Kafka Cluster. | Kafka is deployed, KafkaNodePool custom resource is targeting Kafka Cluster as expected. | +| 1. | Deploy Kafka annotated to enable KRaft (and additionally annotated to enable node pool management), and configure a KafkaNodePool resource to target the Kafka cluster. | Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected. | | 2. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | | 3. | Trigger manual Rolling Update. | Rolling update is triggered and completed shortly after. | @@ -88,17 +88,17 @@ ## testKafkaJBODDeleteClaimsTrueFalse -**Description:** This test case verifies that Kafka with persistent storage, and JBOD storage, property 'delete claim' of JBOD storage. +**Description:** This test case verifies Kafka running with persistent JBOD storage, and configured with the `deleteClaim` storage property. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy Kafka with persistent storage and JBOD storage with 2 volumes, both of these are configured to delete their Persistent Volume Claims on Kafka cluster un-provision. | Kafka is deployed, volumes are labeled and linked to Pods correctly. | -| 2. | Verify that labels in Persistent Volume Claims are set correctly. | Persistent Volume Claims do contain expected labels and values. | -| 3. | Modify Kafka Custom Resource, specifically 'delete claim' property of its first Kafka Volume. | Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by Cluster Operator. | -| 4. | Delete Kafka cluster. | Kafka cluster and its components are deleted, including Persistent Volume Claim of Volume with 'delete claim' property set to true. | -| 5. | Verify remaining Persistent Volume Claims. | Persistent Volume Claim referenced by volume of formerly deleted Kafka Custom Resource with property 'delete claim' set to true is still present. | +| 1. | Deploy Kafka with persistent storage and JBOD storage with 2 volumes, both of which are configured to delete their Persistent Volume Claims on Kafka cluster un-provision. | Kafka is deployed, volumes are labeled and linked to Pods correctly. | +| 2. | Verify that labels in Persistent Volume Claims are set correctly. | Persistent Volume Claims contains expected labels and values. | +| 3. | Modify Kafka Custom Resource, specifically 'deleteClaim' property of its first Kafka Volume. | Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by Cluster Operator. | +| 4. | Delete Kafka cluster. | Kafka cluster and its components are deleted, including Persistent Volume Claim of Volume with 'deleteClaim' property set to true. | +| 5. | Verify remaining Persistent Volume Claims. | Persistent Volume Claim referenced by volume of formerly deleted Kafka Custom Resource with property 'deleteClaim' set to true is still present. | **Labels:** @@ -107,16 +107,16 @@ ## testLabelsExistenceAndManipulation -**Description:** This test case verifies the presence of expected Strimzi specific labels, also labels and annotations specified by user. Some of user-specified labels are later modified (new one is added, one is modified) which triggers rolling update after which all changes took place as expected. +**Description:** This test case verifies the presence of expected Strimzi specific labels, also labels and annotations specified by user. Some user-specified labels are later modified (new one is added, one is modified) which triggers rolling update after which all changes took place as expected. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy Kafka with persistent storage and specify custom labels in CR metadata, and also other labels and annotation in PVC metadata | Kafka is deployed with its default labels and all others specified by user. | -| 2. | Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected | Producer and Consumer are able to produce and consume messages respectively. | -| 3. | Modify configuration of Kafka CR with addition of new labels and modification of existing | Kafka is rolling and new labels are present in Kafka CR, and managed resources | -| 4. | Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected | Producer and Consumer are able to produce and consume messages respectively. | +| 1. | Deploy Kafka with persistent storage and specify custom labels in CR metadata, and also other labels and annotation in PVC metadata. | Kafka is deployed with its default labels and all others specified by user. | +| 2. | Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected. | Producer and Consumer are able to produce and consume messages respectively. | +| 3. | Modify configuration of Kafka CR with addition of new labels and modification of existing. | Kafka is rolling and new labels are present in Kafka CR, and managed resources. | +| 4. | Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected. | Producer and Consumer are able to produce and consume messages respectively. | **Labels:** @@ -167,10 +167,10 @@ | Step | Action | Result | | - | - | - | -| 1. | Create Kafka without external listener | Kafka instance is created without an external listener | -| 2. | Edit Kafka to include an external listener | External listener is correctly added to the Kafka instance | -| 3. | Wait until the Kafka component has rolled | Kafka component rolls successfully with the new external listener | -| 4. | Compare Kafka broker secrets before and after adding external listener | Secrets are different before and after adding the external listener | +| 1. | Create Kafka without external listener. | Kafka instance is created without an external listener. | +| 2. | Edit Kafka to include an external listener. | External listener is correctly added to the Kafka instance. | +| 3. | Wait until the Kafka component has rolled. | Kafka component rolls successfully with the new external listener. | +| 4. | Compare Kafka broker secrets before and after adding external listener. | Secrets are different before and after adding the external listener. | **Labels:** @@ -179,17 +179,17 @@ ## testRemoveComponentsFromEntityOperator -**Description:** This test case verifies the correct deployment of Entity Operator, i.e., including both User Operator and Topic Operator. Entity Operator is firstly modified to exclude User Operator, afterwards it is modified to default configuration, which includes User Operator. The next step is removal of Topic Operator itself and finally, also removing User Operator, with Topic Operator being already removed. +**Description:** his test case verifies the correct deployment of the Entity Operator, including both the User Operator and Topic Operator. First, the Entity Operator is modified to exclude the User Operator. Then, it's restored to its default configuration, which includes the User Operator. Next, the Topic Operator is removed, followed by the User Operator, with the Topic Operator already excluded **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy Kafka with Entity Operator set. | Kafka is deployed, and Entity Operator consist of both Topic and User Operators | -| 2. | Remove User Operator from the Kafka specification | User Operator container is deleted | -| 3. | Set User Operator back in the Kafka specification | User Operator container is recreated | -| 4. | Remove Topic Operator from the Kafka specification | Topic Operator container is removed from Entity Operator | -| 5. | Remove User Operator from the Kafka specification | Entity Operator Pod is removed, as there are no other containers present. | +| 1. | Deploy Kafka with Entity Operator set. | Kafka is deployed, and Entity Operator consists of both Topic Operator and User Operator. | +| 2. | Remove User Operator from the Kafka specification. | User Operator container is deleted. | +| 3. | Set User Operator back in the Kafka specification. | User Operator container is recreated. | +| 4. | Remove Topic Operator from the Kafka specification. | Topic Operator container is removed from Entity Operator. | +| 5. | Remove User Operator from the Kafka specification. | Entity Operator Pod is removed, as there are no other containers present. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md index 4255316d38b..194baf3fdd1 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md @@ -1,12 +1,12 @@ # KafkaVersionsST -**Description:** Test checking basic functionality for each supported Kafka version. Ensures that Kafka functionality such as deployment, Topic Operator, User Operator, and message transmission via PLAIN and TLS listeners are working correctly. +**Description:** Verifies the basic functionality for each supported Kafka version. **Before tests execution steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy cluster operator with default installation | Cluster operator is deployed | +| 1. | Deploy Cluster Operator with default installation. | Cluster Operator is deployed. | **Labels:** @@ -16,17 +16,17 @@ ## testKafkaWithVersion -**Description:** Test checking basic functionality for each supported Kafka version. Ensures that Kafka functionality such as deployment, Topic Operator, User Operator, and message transmission via PLAIN and TLS listeners are working correctly. +**Description:** Tests the basic functionality for each supported Kafka version, ensuring that deployment, Topic Operator, User Operator, and message transmission via PLAIN and TLS listeners work correctly. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy Kafka cluster with specified version | Kafka cluster is deployed without any issue | -| 2. | Verify the Topic Operator creation | Topic Operator is working correctly | -| 3. | Verify the User Operator creation | User Operator is working correctly with SCRAM-SHA and ACLs | -| 4. | Send and receive messages via PLAIN with SCRAM-SHA | Messages are sent and received successfully | -| 5. | Send and receive messages via TLS | Messages are sent and received successfully | +| 1. | Deploy Kafka cluster with specified version. | Kafka cluster is deployed without any issue. | +| 2. | Verify the Topic Operator creation. | Topic Operator is working correctly. | +| 3. | Verify the User Operator creation. | User Operator is working correctly with SCRAM-SHA and ACLs. | +| 4. | Send and receive messages via PLAIN with SCRAM-SHA. | Messages are sent and received successfully. | +| 5. | Send and receive messages via TLS. | Messages are sent and received successfully. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md index 557f848691d..737c583758c 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md @@ -1,12 +1,12 @@ # QuotasST -**Description:** NOTE: STs in this class will not properly work on `minikube` clusters (and maybe not on other clusters that use local storage), because the calculation of currently used storage is based on the local storage, which can be shared across multiple Docker containers. To properly run this suite, you should use cluster with proper storage. +**Description:** NOTE: STs in this class will not work properly on `minikube` clusters (and maybe not on other clusters that use local storage), because the calculation of currently used storage is based on the local storage, which can be shared across multiple Docker containers. To properly run this suite, you should use cluster with proper storage. **Before tests execution steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy default cluster operator with the required configurations | Cluster operator is deployed | +| 1. | Deploy default cluster operator with the required configurations. | Cluster operator is deployed. | **Labels:** @@ -22,12 +22,12 @@ | Step | Action | Result | | - | - | - | -| 1. | Assume the cluster is not Minikube or MicroShift | Cluster is appropriate for the test | -| 2. | Create necessary resources for Kafka and nodes | Resources are created and Kafka is set up with quotas plugin | -| 3. | Send messages without any user; observe quota enforcement | Producer stops after reaching the minimum available bytes | -| 4. | Check Kafka logs for quota enforcement message | Kafka logs contain the expected quota enforcement message | -| 5. | Send messages with excluded user and observe the behavior | Messages are sent successfully without hitting the quota | -| 6. | Clean up resources | Resources are deleted successfully | +| 1. | Assume the cluster is not Minikube or MicroShift. | Cluster is appropriate for the test. | +| 2. | Create necessary resources for Kafka and nodes. | Resources are created and Kafka is set up with quotas plugin. | +| 3. | Send messages without any user; observe quota enforcement. | Producer stops after reaching the minimum available bytes. | +| 4. | Check Kafka logs for quota enforcement message. | Kafka logs contain the expected quota enforcement message. | +| 5. | Send messages with excluded user and observe the behavior. | Messages are sent successfully without hitting the quota. | +| 6. | Clean up resources. | Resources are deleted successfully. | **Labels:** @@ -42,12 +42,12 @@ | Step | Action | Result | | - | - | - | -| 1. | Set excluded principal | Principal is set | -| 2. | Create Kafka resources including node pools and persistent Kafka with quotas enabled | Kafka resources are created successfully with quotas setup | -| 3. | Create Kafka topic and user with SCRAM-SHA authentication | Kafka topic and SCRAM-SHA user are created successfully | -| 4. | Send messages with normal user | Messages are sent and duration is measured | -| 5. | Send messages with excluded user | Messages are sent and duration is measured | -| 6. | Assert that time taken for normal user is greater than for excluded user | Assertion is successful | +| 1. | Set excluded principal. | Principal is set. | +| 2. | Create Kafka resources including node pools and persistent Kafka with quotas enabled. | Kafka resources are created successfully with quotas setup. | +| 3. | Create Kafka topic and user with SCRAM-SHA authentication. | Kafka topic and SCRAM-SHA user are created successfully. | +| 4. | Send messages with normal user. | Messages are sent and duration is measured. | +| 5. | Send messages with excluded user. | Messages are sent and duration is measured. | +| 6. | Assert that time taken for normal user is greater than for excluded user. | Assertion is successful. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md index 886a1e32fe5..ca645249051 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md @@ -6,11 +6,11 @@ | Step | Action | Result | | - | - | - | -| 1. | Create test namespace | Namespace is created | -| 2. | Build Kafka image based on passed parameters like image full name, base image, Dockerfile path (via Kaniko or OpenShift build) | Kafka image is built | -| 3. | Deploy Minio in test namespace and init the client inside the Minio pod | Minio is deployed and client is initialized | -| 4. | Init bucket in Minio for purposes of these tests | Bucket is initialized in Minio | -| 5. | Deploy Strimzi Cluster Operator | Strimzi Cluster Operator is deployed | +| 1. | Create test namespace. | Namespace is created. | +| 2. | Build Kafka image based on passed parameters like image full name, base image, Dockerfile path (via Kaniko or OpenShift build). | Kafka image is built. | +| 3. | Deploy Minio in test namespace and init the client inside the Minio pod. | Minio is deployed and client is initialized. | +| 4. | Init bucket in Minio for purposes of these tests. | Bucket is initialized in Minio. | +| 5. | Deploy Cluster Operator. | Cluster Operator is deployed. | **Labels:** @@ -20,17 +20,17 @@ ## testTieredStorageWithAivenPlugin -**Description:** This testcase is focused on testing of Tiered Storage integration implemented within Strimzi. The tests use Aiven Tiered Storage plugin - ... +**Description:** This testcase is focused on testing of Tiered Storage integration implemented within Strimzi. The tests use Aiven Tiered Storage plugin (tiered-storage-for-apache-kafka). **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploys KafkaNodePool resource with Broker NodePool with PV of size 10Gi | KafkaNodePool resource is deployed successfully with specified configuration | -| 2. | Deploys Kafka resource with configuration of Tiered Storage for Aiven plugin, pointing to Minio S3, and with image built in beforeAll | Kafka resource is deployed successfully with Tiered Storage configuration | -| 3. | Creates topic with enabled Tiered Storage sync with size of segments set to 10mb (this is needed for speedup the sync) | Topic is created successfully with Tiered Storage enabled and segment size of 10mb | -| 4. | Starts continuous producer to send data to Kafka | Continuous producer starts sending data to Kafka | -| 5. | Wait until Minio size is not empty (contains data from Kafka) | Minio contains data from Kafka | +| 1. | Deploys KafkaNodePool resource with PV of size 10Gi. | KafkaNodePool resource is deployed successfully with specified configuration. | +| 2. | Deploys Kafka resource with configuration of Tiered Storage for Aiven plugin, pointing to Minio S3, and with image built in beforeAll. | Kafka resource is deployed successfully with Tiered Storage configuration. | +| 3. | Creates topic with enabled Tiered Storage sync with size of segments set to 10mb (this is needed to speed up the sync). | Topic is created successfully with Tiered Storage enabled and segment size of 10mb. | +| 4. | Starts continuous producer to send data to Kafka. | Continuous producer starts sending data to Kafka. | +| 5. | Wait until Minio size is not empty (contains data from Kafka). | Minio contains data from Kafka. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md index 306da359b9d..c300b7fe58b 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy the Cluster Operator | Cluster Operator is installed successfully | +| 1. | Deploy the Cluster Operator. | Cluster Operator is installed successfully. |
@@ -18,13 +18,13 @@ | Step | Action | Result | | - | - | - | -| 1. | Deep copy shard Kafka configuration | Configuration map is duplicated with deep copy | -| 2. | Create resources with wait | Resources are created and ready | -| 3. | Create scraper pod | Scraper pod is created | -| 4. | Retrieve and verify Kafka configurations from ConfigMaps | Configurations meet expected values | -| 5. | Retrieve Kafka broker configuration via CLI | Dynamic configurations are retrieved | -| 6. | Update Kafka configuration for unclean leader election | Configuration is updated and verified for dynamic property | -| 7. | Verify updated Kafka configurations | Updated configurations are persistent and correct | +| 1. | Deep copy shared Kafka configuration. | Configuration map is duplicated with deep copy. | +| 2. | Create resources with wait. | Resources are created and ready. | +| 3. | Create scraper pod. | Scraper pod is created. | +| 4. | Retrieve and verify Kafka configurations from ConfigMaps. | Configurations meet expected values. | +| 5. | Retrieve Kafka broker configuration via CLI. | Dynamic configurations are retrieved. | +| 6. | Update Kafka configuration for unclean leader election. | Configuration is updated and verified for dynamic property. | +| 7. | Verify updated Kafka configurations. | Updated configurations are persistent and correct. | **Labels:** @@ -59,15 +59,15 @@ | Step | Action | Result | | - | - | - | -| 1. | Setup initial Kafka cluster and resources | Kafka cluster and resources are successfully created | -| 2. | Create external Kafka clients and verify message production/consumption on plain listener | Messages are successfully produced and consumed using plain listener | -| 3. | Attempt to produce/consume messages using TLS listener before update | Exception is thrown because the listener is plain | -| 4. | Update Kafka cluster to use external TLS listener | Kafka cluster is updated and rolling restart occurs | -| 5. | Verify message production/consumption using TLS listener after update | Messages are successfully produced and consumed using TLS listener | -| 6. | Attempt to produce/consume messages using plain listener after TLS update | Exception is thrown because the listener is TLS | -| 7. | Revert Kafka cluster listener to plain | Kafka cluster listener is reverted and rolling restart occurs | -| 8. | Verify message production/consumption on plain listener after reverting | Messages are successfully produced and consumed using plain listener | -| 9. | Attempt to produce/consume messages using TLS listener after reverting | Exception is thrown because the listener is plain | +| 1. | Setup initial Kafka cluster and resources. | Kafka cluster and resources are successfully created. | +| 2. | Create external Kafka clients and verify message production/consumption on plain listener. | Messages are successfully produced and consumed using plain listener. | +| 3. | Attempt to produce/consume messages using TLS listener before update. | Exception is thrown because the listener is plain. | +| 4. | Update Kafka cluster to use external TLS listener. | Kafka cluster is updated and rolling restart occurs. | +| 5. | Verify message production/consumption using TLS listener after update. | Messages are successfully produced and consumed using TLS listener. | +| 6. | Attempt to produce/consume messages using plain listener after TLS update. | Exception is thrown because the listener is TLS. | +| 7. | Revert Kafka cluster listener to plain. | Kafka cluster listener is reverted and rolling restart occurs. | +| 8. | Verify message production/consumption on plain listener after reverting. | Messages are successfully produced and consumed using plain listener. | +| 9. | Attempt to produce/consume messages using TLS listener after reverting. | Exception is thrown because the listener is plain. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md index 28731b2c730..ab4dc47f2b3 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md @@ -6,9 +6,9 @@ | Step | Action | Result | | - | - | - | -| 1. | Run cluster operator installation | Cluster operator is installed | -| 2. | Deploy shared Kafka across all test cases | Shared Kafka is deployed | -| 3. | Deploy scraper pod | Scraper pod is deployed | +| 1. | Run Cluster Operator installation. | Cluster Operator is installed. | +| 2. | Deploy shared Kafka across all test cases. | Shared Kafka is deployed. | +| 3. | Deploy scraper pod. | Scraper pod is deployed. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md index 0442eb278b1..e8147445080 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Install the cluster operator with default settings | Cluster operator is installed successfully | +| 1. | Install the cluster operator with default settings. | Cluster operator is installed successfully. | **Labels:** @@ -384,11 +384,11 @@ | Step | Action | Result | | - | - | - | -| 1. | Create resource with Kafka broker pool and controller pool | Resources with Kafka pools are created successfully | -| 2. | Create Kafka cluster with NodePort and TLS listeners | Kafka cluster is set up with the specified listeners | -| 3. | Create ExternalKafkaClient and verify message production and consumption | Messages are produced and consumed successfully | -| 4. | Check Kafka status for proper listener addresses | Listener addresses in Kafka status are validated successfully | -| 5. | Check ClusterRoleBinding annotations and labels in Kafka cluster | Annotations and labels match the expected values | +| 1. | Create resource with Kafka broker pool and controller pool. | Resources with Kafka pools are created successfully. | +| 2. | Create Kafka cluster with NodePort and TLS listeners. | Kafka cluster is set up with the specified listeners. | +| 3. | Create ExternalKafkaClient and verify message production and consumption. | Messages are produced and consumed successfully. | +| 4. | Check Kafka status for proper listener addresses. | Listener addresses in Kafka status are validated successfully. | +| 5. | Check ClusterRoleBinding annotations and labels in Kafka cluster. | Annotations and labels match the expected values. | **Labels:** @@ -403,7 +403,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create Kafka broker and controller node pools | Broker and controller node pools are created | +| 1. | Create Kafka broker and controller node pools. | Broker and controller node pools are created | | 2. | Deploy Kafka cluster with NodePort listener and TLS enabled | Kafka cluster is deployed with NodePort listener and TLS | | 3. | Create a Kafka topic | Kafka topic is created | | 4. | Create a Kafka user with TLS authentication | Kafka user with TLS authentication is created | @@ -459,10 +459,10 @@ | Step | Action | Result | | - | - | - | -| 1. | Create a Kafka cluster with broker and controller node pools | Kafka cluster is created with node pools | -| 2. | Create a Kafka cluster with custom listener using TLS and SCRAM-SHA authentication | Kafka cluster with custom listener is ready | -| 3. | Create a Kafka topic and SCRAM-SHA user | Kafka topic and user are created | -| 4. | Transmit messages over TLS using SCRAM-SHA authentication | Messages are transmitted successfully | +| 1. | Create a Kafka cluster with broker and controller node pools. | Kafka cluster is created with node pools. | +| 2. | Create a Kafka cluster with custom listener using TLS and SCRAM-SHA authentication. | Kafka cluster with custom listener is ready. | +| 3. | Create a Kafka topic and SCRAM-SHA user. | Kafka topic and user are created. | +| 4. | Transmit messages over TLS using SCRAM-SHA authentication. | Messages are transmitted successfully. | **Labels:** @@ -477,10 +477,10 @@ | Step | Action | Result | | - | - | - | -| 1. | Create Kafka resources with wait | Kafka broker, controller, and topic are created | -| 2. | Log transmission message | Transmission message is logged | -| 3. | Produce and consume messages with plain clients | Messages are successfully produced and consumed | -| 4. | Validate Kafka service discovery annotation | The discovery annotation is validated successfully | +| 1. | Create Kafka resources with wait. | Kafka broker, controller, and topic are created. | +| 2. | Log transmission message. | Transmission message is logged. | +| 3. | Produce and consume messages with plain clients. | Messages are successfully produced and consumed. | +| 4. | Validate Kafka service discovery annotation. | The discovery annotation is validated successfully. | **Labels:** @@ -495,12 +495,12 @@ | Step | Action | Result | | - | - | - | -| 1. | Create Kafka brokers and controllers | Kafka brokers and controllers are created | -| 2. | Enable Kafka with plain listener disabled and scram sha auth | Kafka instance with scram sha auth is enabled on a specified listener | -| 3. | Set up topic and user | Kafka topic and Kafka user are set up with scram sha auth credentials | -| 4. | Check logs in broker pod for authentication | Logs show that scram sha authentication succeeded | -| 5. | Send messages over plain transport using scram sha authentication | Messages are successfully sent over plain transport using scram sha auth | -| 6. | Verify service discovery annotation | Service discovery annotation is checked and validated | +| 1. | Create Kafka brokers and controllers. | Kafka brokers and controllers are created. | +| 2. | Enable Kafka with plain listener disabled and scram sha auth. | Kafka instance with scram sha auth is enabled on a specified listener. | +| 3. | Set up topic and user. | Kafka topic and Kafka user are set up with scram sha auth credentials. | +| 4. | Check logs in broker pod for authentication. | Logs show that scram sha authentication succeeded. | +| 5. | Send messages over plain transport using scram sha authentication. | Messages are successfully sent over plain transport using scram sha auth. | +| 6. | Verify service discovery annotation. | Service discovery annotation is checked and validated. | **Labels:** @@ -515,12 +515,12 @@ | Step | Action | Result | | - | - | - | -| 1. | Create Kafka node pool resources | Persistent storage node pools are created | -| 2. | Disable plain listener and enable tls listener in Kafka resource | Kafka with plain listener disabled and tls listener enabled is created | -| 3. | Create Kafka topic and user | Kafka topic and tls user are created | -| 4. | Configure and deploy Kafka clients | Kafka clients producer and consumer with tls are deployed | -| 5. | Wait for clients to successfully send and receive messages | Clients successfully send and receive messages over tls | -| 6. | Assert that the service discovery contains expected info | Service discovery matches expected info | +| 1. | Create Kafka node pool resources. | Persistent storage node pools are created. | +| 2. | Disable plain listener and enable tls listener in Kafka resource. | Kafka with plain listener disabled and tls listener enabled is created. | +| 3. | Create Kafka topic and user. | Kafka topic and tls user are created. | +| 4. | Configure and deploy Kafka clients. | Kafka clients producer and consumer with tls are deployed. | +| 5. | Wait for clients to successfully send and receive messages. | Clients successfully send and receive messages over tls. | +| 6. | Assert that the service discovery contains expected info. | Service discovery matches expected info. | **Labels:** @@ -535,12 +535,12 @@ | Step | Action | Result | | - | - | - | -| 1. | Create resources for Kafka node pools | Kafka node pools are created | -| 2. | Create Kafka cluster with SCRAM-SHA-512 authentication | Kafka cluster is created with SCRAM-SHA authentication | -| 3. | Create Kafka topic and user | Kafka topic and user are created | -| 4. | Transmit messages over TLS using SCRAM-SHA | Messages are successfully transmitted | -| 5. | Check if generated password has the expected length | Password length is as expected | -| 6. | Verify Kafka service discovery annotation | Service discovery annotation is as expected | +| 1. | Create resources for Kafka node pools. | Kafka node pools are created. | +| 2. | Create Kafka cluster with SCRAM-SHA-512 authentication. | Kafka cluster is created with SCRAM-SHA authentication. | +| 3. | Create Kafka topic and user. | Kafka topic and user are created. | +| 4. | Transmit messages over TLS using SCRAM-SHA. | Messages are successfully transmitted. | +| 5. | Check if generated password has the expected length. | Password length is as expected. | +| 6. | Verify Kafka service discovery annotation. | Service discovery annotation is as expected. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md index 9423eef096c..db06be6c04a 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md @@ -16,9 +16,9 @@ | Step | Action | Result | | - | - | - | -| 1. | Retrieve different types of Kafka listeners | Lists of INTERNAL, NODEPORT, ROUTE, and LOADBALANCER listeners are retrieved | -| 2. | Combine all different listener lists | A combined list of all Kafka listener types is created | -| 3. | Run listeners test with combined listener list | Listeners test runs with all types of Kafka listeners in the combined list | +| 1. | Retrieve different types of Kafka listeners. | Lists of INTERNAL, NODEPORT, ROUTE, and LOADBALANCER listeners are retrieved. | +| 2. | Combine all different listener lists. | A combined list of all Kafka listener types is created. | +| 3. | Run listeners test with combined listener list. | Listeners test runs with all types of Kafka listeners in the combined list. | **Labels:** @@ -33,9 +33,9 @@ | Step | Action | Result | | - | - | - | -| 1. | Check if the environment supports cluster-wide NodePort rights | Test is skipped if the environment is not suitable | -| 2. | Retrieve and combine internal and NodePort listeners | Listeners are successfully retrieved and combined | -| 3. | Run listeners test with combined listeners | Listeners test is executed successfully | +| 1. | Check if the environment supports cluster-wide NodePort rights. | Test is skipped if the environment is not suitable. | +| 2. | Retrieve and combine internal and NodePort listeners. | Listeners are successfully retrieved and combined. | +| 3. | Run listeners test with combined listeners. | Listeners test is executed successfully. | **Labels:** @@ -50,10 +50,10 @@ | Step | Action | Result | | - | - | - | -| 1. | Retrieve route listeners | Route listeners are retrieved from test cases | -| 2. | Retrieve nodeport listeners | Nodeport listeners are retrieved from test cases | -| 3. | Combine route and nodeport listeners | Multiple different listeners list is populated | -| 4. | Run listeners test | Listeners test runs using the combined list | +| 1. | Retrieve route listeners. | Route listeners are retrieved from test cases. | +| 2. | Retrieve nodeport listeners. | Nodeport listeners are retrieved from test cases. | +| 3. | Combine route and nodeport listeners. | Multiple different listeners list is populated. | +| 4. | Run listeners test. | Listeners test runs using the combined list. | **Labels:** @@ -68,7 +68,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Run the internal Kafka listeners test | Listeners test runs successfully on the specified cluster | +| 1. | Run the internal Kafka listeners test. | Listeners test runs successfully on the specified cluster. | **Labels:** @@ -83,8 +83,8 @@ | Step | Action | Result | | - | - | - | -| 1. | Run listeners test with LOADBALANCER type | Listeners test executes successfully with load balancers | -| 2. | Validate the results | Results match the expected outcomes for multiple load balancers | +| 1. | Run listeners test with LOADBALANCER type. | Listeners test executes successfully with load balancers. | +| 2. | Validate the results. | Results match the expected outcomes for multiple load balancers. | **Labels:** @@ -99,7 +99,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Execute listener tests with NodePort configuration | Listener tests run without issues using NodePort | +| 1. | Execute listener tests with NodePort configuration. | Listener tests run without issues using NodePort. | **Labels:** @@ -114,8 +114,8 @@ | Step | Action | Result | | - | - | - | -| 1. | Retrieve test cases for Kafka Listener Type ROUTE | Test cases for ROUTE are retrieved | -| 2. | Run listener tests using the retrieved test cases and cluster name | Listener tests run successfully with no errors | +| 1. | Retrieve test cases for Kafka Listener Type ROUTE. | Test cases for ROUTE are retrieved. | +| 2. | Run listener tests using the retrieved test cases and cluster name. | Listener tests run successfully with no errors. | **Labels:** diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeCorsST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeCorsST.java index 29a733b4087..c38c70638cc 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeCorsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeCorsST.java @@ -52,8 +52,8 @@ @SuiteDoc( description = @Desc("Test suite for HTTP Bridge CORS functionality, focusing on verifying correct handling of allowed and forbidden origins."), beforeTestSteps = { - @Step(value = "Set up Kafka Bridge and its configuration including CORS settings", expected = "Kafka Bridge is set up with the correct configuration"), - @Step(value = "Deploy required Kafka resources and scraper pod", expected = "Kafka resources and scraper pod are deployed and running") + @Step(value = "Set up Kafka Bridge and its configuration including CORS settings.", expected = "Kafka Bridge is set up with the correct configuration."), + @Step(value = "Deploy required Kafka resources and scraper pod.", expected = "Kafka resources and scraper pod are deployed and running.") }, labels = { @Label(TestDocsLabels.BRIDGE) @@ -72,12 +72,12 @@ public class HttpBridgeCorsST extends AbstractST { @TestDoc( description = @Desc("This test checks if CORS handling for allowed origin works correctly in the Kafka Bridge."), steps = { - @Step(value = "Set up the Kafka Bridge user and configuration", expected = "Kafka Bridge user and configuration are set up"), - @Step(value = "Construct the request URL and headers", expected = "URL and headers are constructed properly"), - @Step(value = "Send OPTIONS request to Kafka Bridge and capture the response", expected = "Response is captured from Bridge"), - @Step(value = "Validate the response contains expected status codes and headers", expected = "Response has correct status codes and headers for allowed origin"), - @Step(value = "Send GET request to Kafka Bridge and capture the response", expected = "Response is captured from Bridge for GET request"), - @Step(value = "Check if the GET request response is '404 Not Found'", expected = "Response for GET request is 404 Not Found") + @Step(value = "Set up the Kafka Bridge user and configuration.", expected = "Kafka Bridge user and configuration are set up."), + @Step(value = "Construct the request URL and headers.", expected = "URL and headers are constructed properly."), + @Step(value = "Send OPTIONS request to Kafka Bridge and capture the response.", expected = "Response is captured from Bridge."), + @Step(value = "Validate the response contains expected status codes and headers.", expected = "Response has correct status codes and headers for allowed origin."), + @Step(value = "Send GET request to Kafka Bridge and capture the response.", expected = "Response is captured from Bridge for GET request."), + @Step(value = "Check if the GET request response is '404 Not Found'.", expected = "Response for GET request is 404 Not Found.") }, labels = { @Label(TestDocsLabels.BRIDGE) @@ -126,13 +126,13 @@ void testCorsOriginAllowed() { @TestDoc( description = @Desc("Test ensuring that CORS (Cross-Origin Resource Sharing) requests with forbidden origins are correctly rejected by the Bridge."), steps = { - @Step(value = "Create Kafka Bridge user and consumer group", expected = "Kafka Bridge user and consumer group are created successfully"), - @Step(value = "Set up headers with forbidden origin and pre-flight HTTP OPTIONS method", expected = "Headers and method are set correctly"), - @Step(value = "Send HTTP OPTIONS request to the Bridge", expected = "HTTP OPTIONS request is sent to the Bridge and a response is received"), - @Step(value = "Verify the response contains '403' and 'CORS Rejected - Invalid origin'", expected = "Response indicates the CORS request is rejected"), - @Step(value = "Remove 'Access-Control-Request-Method' from headers and set HTTP POST method", expected = "Headers are updated and HTTP method is set correctly"), - @Step(value = "Send HTTP POST request to the Bridge", expected = "HTTP POST request is sent to the Bridge and a response is received"), - @Step(value = "Verify the response contains '403' and 'CORS Rejected - Invalid origin'", expected = "Response indicates the CORS request is rejected") + @Step(value = "Create Kafka Bridge user and consumer group.", expected = "Kafka Bridge user and consumer group are created successfully."), + @Step(value = "Set up headers with forbidden origin and pre-flight HTTP OPTIONS method.", expected = "Headers and method are set correctly."), + @Step(value = "Send HTTP OPTIONS request to the Bridge.", expected = "HTTP OPTIONS request is sent to the Bridge and a response is received."), + @Step(value = "Verify the response contains '403' and 'CORS Rejected - Invalid origin'.", expected = "Response indicates the CORS request is rejected."), + @Step(value = "Remove 'Access-Control-Request-Method' from headers and set HTTP POST method.", expected = "Headers are updated and HTTP method is set correctly."), + @Step(value = "Send HTTP POST request to the Bridge.", expected = "HTTP POST request is sent to the Bridge and a response is received."), + @Step(value = "Verify the response contains '403' and 'CORS Rejected - Invalid origin'.", expected = "Response indicates the CORS request is rejected.") }, labels = { @Label(TestDocsLabels.BRIDGE) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java index 6437fdd15a4..00665683932 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java @@ -70,7 +70,7 @@ @SuiteDoc( description = @Desc("Test suite for various Kafka Bridge operations."), beforeTestSteps = { - @Step(value = "Initialize Test Storage and deploy Kafka and Kafka Bridge", expected = "Kafka and Kafka Bridge are deployed with necessary configuration") + @Step(value = "Initialize Test Storage and deploy Kafka and Kafka Bridge.", expected = "Kafka and Kafka Bridge are deployed with necessary configuration.") }, labels = { @Label(TestDocsLabels.BRIDGE), @@ -85,15 +85,15 @@ class HttpBridgeST extends AbstractST { @TestDoc( description = @Desc("Test validating that sending a simple message through Kafka Bridge works correctly and checks labels."), steps = { - @Step(value = "Initialize test storage", expected = "Test storage is initialized with necessary context"), - @Step(value = "Create a Kafka Bridge client job", expected = "Kafka Bridge client job is configured and instantiated"), - @Step(value = "Create Kafka topic", expected = "Kafka topic is successfully created"), - @Step(value = "Start Kafka Bridge producer", expected = "Kafka Bridge producer successfully begins sending messages"), - @Step(value = "Wait for producer success", expected = "All messages are sent successfully"), - @Step(value = "Start Kafka consumer", expected = "Kafka consumer is instantiated and starts consuming messages"), - @Step(value = "Wait for consumer success", expected = "All messages are consumed successfully"), - @Step(value = "Verify Kafka Bridge pod labels", expected = "Labels for Kafka Bridge pods are correctly set and verified"), - @Step(value = "Verify Kafka Bridge service labels", expected = "Labels for Kafka Bridge service are correctly set and verified") + @Step(value = "Initialize test storage.", expected = "Test storage is initialized with necessary context."), + @Step(value = "Create a Kafka Bridge client job.", expected = "Kafka Bridge client job is configured and instantiated."), + @Step(value = "Create Kafka topic.", expected = "Kafka topic is successfully created."), + @Step(value = "Start Kafka Bridge producer.", expected = "Kafka Bridge producer successfully begins sending messages."), + @Step(value = "Wait for producer success.", expected = "All messages are sent successfully."), + @Step(value = "Start Kafka consumer.", expected = "Kafka consumer is instantiated and starts consuming messages."), + @Step(value = "Wait for consumer success.", expected = "All messages are consumed successfully."), + @Step(value = "Verify Kafka Bridge pod labels.", expected = "Labels for Kafka Bridge pods are correctly set and verified."), + @Step(value = "Verify Kafka Bridge service labels.", expected = "Labels for Kafka Bridge service are correctly set and verified.") }, labels = { @Label(TestDocsLabels.BRIDGE), @@ -133,11 +133,11 @@ void testSendSimpleMessage() { @TestDoc( description = @Desc("Test verifying that a simple message can be received using Kafka Bridge."), steps = { - @Step(value = "Initialize the test storage", expected = "TestStorage instance is initialized"), - @Step(value = "Create Kafka topic resource", expected = "Kafka topic resource is created with specified configurations"), - @Step(value = "Setup and deploy Kafka Bridge consumer client", expected = "Kafka Bridge consumer client is set up and started receiving messages"), - @Step(value = "Send messages using Kafka producer", expected = "Messages are sent to Kafka successfully"), - @Step(value = "Verify message reception", expected = "All messages are received by Kafka Bridge consumer client") + @Step(value = "Initialize the test storage.", expected = "TestStorage instance is initialized."), + @Step(value = "Create Kafka topic resource.", expected = "Kafka topic resource is created with specified configurations."), + @Step(value = "Setup and deploy Kafka Bridge consumer client.", expected = "Kafka Bridge consumer client is set up and started receiving messages."), + @Step(value = "Send messages using Kafka producer.", expected = "Messages are sent to Kafka successfully."), + @Step(value = "Verify message reception.", expected = "All messages are received by Kafka Bridge consumer client.") }, labels = { @Label(TestDocsLabels.BRIDGE), @@ -174,12 +174,12 @@ void testReceiveSimpleMessage() { @TestDoc( description = @Desc("Test that validates the creation, update, and verification of a Kafka Bridge with specific initial and updated configurations."), steps = { - @Step(value = "Create a Kafka Bridge resource with initial configuration", expected = "Kafka Bridge is created and deployed with the specified initial configuration"), - @Step(value = "Remove an environment variable that is in use", expected = "Environment variable TEST_ENV_1 is removed from the initial configuration"), - @Step(value = "Verify initial probe values and environment variables", expected = "The probe values and environment variables match the initial configuration"), - @Step(value = "Update Kafka Bridge resource with new configuration", expected = "Kafka Bridge is updated and redeployed with the new configuration"), - @Step(value = "Verify updated probe values and environment variables", expected = "The probe values and environment variables match the updated configuration"), - @Step(value = "Verify Kafka Bridge configurations for producer and consumer", expected = "Producer and consumer configurations match the updated settings") + @Step(value = "Create a Kafka Bridge resource with initial configuration.", expected = "Kafka Bridge is created and deployed with the specified initial configuration."), + @Step(value = "Remove an environment variable that is in use.", expected = "Environment variable TEST_ENV_1 is removed from the initial configuration."), + @Step(value = "Verify initial probe values and environment variables.", expected = "The probe values and environment variables match the initial configuration."), + @Step(value = "Update Kafka Bridge resource with new configuration.", expected = "Kafka Bridge is updated and redeployed with the new configuration."), + @Step(value = "Verify updated probe values and environment variables.", expected = "The probe values and environment variables match the updated configuration."), + @Step(value = "Verify Kafka Bridge configurations for producer and consumer.", expected = "Producer and consumer configurations match the updated settings.") }, labels = { @Label(TestDocsLabels.BRIDGE) @@ -286,10 +286,10 @@ void testCustomAndUpdatedValues() { @TestDoc( description = @Desc("Test verifying the presence and correctness of the discovery annotation in the Kafka Bridge service."), steps = { - @Step(value = "Retrieve the Kafka Bridge service using kubeClient", expected = "Kafka Bridge service instance is obtained"), - @Step(value = "Extract the discovery annotation from the service metadata", expected = "The discovery annotation is retrieved as a string"), - @Step(value = "Convert the discovery annotation to a JsonArray", expected = "JsonArray representation of the discovery annotation is created"), - @Step(value = "Validate the content of the JsonArray against expected values", expected = "The JsonArray matches the expected service discovery information") + @Step(value = "Retrieve the Kafka Bridge service using kubeClient.", expected = "Kafka Bridge service instance is obtained."), + @Step(value = "Extract the discovery annotation from the service metadata.", expected = "The discovery annotation is retrieved as a string."), + @Step(value = "Convert the discovery annotation to a JsonArray.", expected = "JsonArray representation of the discovery annotation is created."), + @Step(value = "Validate the content of the JsonArray against expected values.", expected = "The JsonArray matches the expected service discovery information.") }, labels = { @Label("service_discovery_verification"), @@ -307,10 +307,10 @@ void testDiscoveryAnnotation() { @TestDoc( description = @Desc("Test that scales a KafkaBridge instance to zero replicas and verifies that it is properly handled."), steps = { - @Step(value = "Create a KafkaBridge resource and wait for it to be ready", expected = "KafkaBridge resource is created and ready with 1 replica"), - @Step(value = "Fetch the current number of KafkaBridge pods", expected = "There should be exactly 1 KafkaBridge pod initially"), - @Step(value = "Scale KafkaBridge to zero replicas", expected = "Scaling action is acknowledged"), - @Step(value = "Wait for KafkaBridge to scale down to zero replicas", expected = "KafkaBridge scales down to zero replicas correctly"), + @Step(value = "Create a KafkaBridge resource and wait for it to be ready.", expected = "KafkaBridge resource is created and ready with 1 replica."), + @Step(value = "Fetch the current number of KafkaBridge pods.", expected = "There should be exactly 1 KafkaBridge pod initially."), + @Step(value = "Scale KafkaBridge to zero replicas.", expected = "Scaling action is acknowledged."), + @Step(value = "Wait for KafkaBridge to scale down to zero replicas.", expected = "KafkaBridge scales down to zero replicas correctly."), @Step(value = "Check the number of KafkaBridge pods after scaling", expected = "No KafkaBridge pods should be running"), @Step(value = "Verify the status of KafkaBridge", expected = "KafkaBridge status should indicate it is ready with zero replicas") }, diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java index e81f7880568..4bce9bc141c 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java @@ -47,12 +47,12 @@ @SuiteDoc( description = @Desc("Test suite for validating Kafka Bridge functionality with TLS and SCRAM-SHA authentication"), beforeTestSteps = { - @Step(value = "Create TestStorage instance", expected = "TestStorage instance is created"), - @Step(value = "Create BridgeClients instance", expected = "BridgeClients instance is created"), - @Step(value = "Deploy Kafka and KafkaBridge", expected = "Kafka and KafkaBridge are deployed successfully"), - @Step(value = "Create Kafka topic", expected = "Kafka topic is created with the given configuration"), - @Step(value = "Create Kafka user with SCRAM-SHA authentication", expected = "Kafka user is created and configured with SCRAM-SHA authentication"), - @Step(value = "Deploy HTTP bridge", expected = "HTTP bridge is deployed") + @Step(value = "Create TestStorage instance.", expected = "TestStorage instance is created."), + @Step(value = "Create BridgeClients instance.", expected = "BridgeClients instance is created."), + @Step(value = "Deploy Kafka and KafkaBridge.", expected = "Kafka and KafkaBridge are deployed successfully."), + @Step(value = "Create Kafka topic.", expected = "Kafka topic is created with the given configuration."), + @Step(value = "Create Kafka user with SCRAM-SHA authentication.", expected = "Kafka user is created and configured with SCRAM-SHA authentication."), + @Step(value = "Deploy HTTP bridge.", expected = "HTTP bridge is deployed.") }, afterTestSteps = { @@ -70,13 +70,13 @@ class HttpBridgeScramShaST extends AbstractST { @TestDoc( description = @Desc("Test ensuring that sending a simple message using TLS and SCRAM-SHA authentication via Kafka Bridge works as expected."), steps = { - @Step(value = "Create TestStorage and BridgeClients objects", expected = "Instances of TestStorage and BridgeClients are created"), - @Step(value = "Create topic using the resource manager", expected = "Topic is created successfully with the specified configuration"), - @Step(value = "Start producing messages via Kafka Bridge", expected = "Messages are produced successfully to the topic"), - @Step(value = "Wait for producer success", expected = "Producer finishes sending messages without errors"), - @Step(value = "Create KafkaClients and configure with TLS and SCRAM-SHA", expected = "Kafka client is configured with appropriate security settings"), - @Step(value = "Start consuming messages via Kafka client", expected = "Messages are consumed successfully from the topic"), - @Step(value = "Wait for consumer success", expected = "Consumer finishes receiving messages without errors") + @Step(value = "Create TestStorage and BridgeClients objects.", expected = "Instances of TestStorage and BridgeClients are created."), + @Step(value = "Create topic using the resource manager.", expected = "Topic is created successfully with the specified configuration."), + @Step(value = "Start producing messages via Kafka Bridge.", expected = "Messages are produced successfully to the topic."), + @Step(value = "Wait for producer success.", expected = "Producer finishes sending messages without errors."), + @Step(value = "Create KafkaClients and configure with TLS and SCRAM-SHA.", expected = "Kafka client is configured with appropriate security settings."), + @Step(value = "Start consuming messages via Kafka client.", expected = "Messages are consumed successfully from the topic."), + @Step(value = "Wait for consumer success.", expected = "Consumer finishes receiving messages without errors.") }, labels = { @Label(TestDocsLabels.BRIDGE) @@ -108,11 +108,11 @@ void testSendSimpleMessageTlsScramSha() { @TestDoc( description = @Desc("Test to check the reception of a simple message via Kafka Bridge using TLS and SCRAM-SHA encryption."), steps = { - @Step(value = "Initialize TestStorage and BridgeClientsBuilder instances", expected = "Instances are successfully initialized"), - @Step(value = "Create Kafka topic using ResourceManager", expected = "Kafka topic is created and available"), - @Step(value = "Create Bridge consumer using ResourceManager", expected = "Bridge consumer is successfully created"), - @Step(value = "Send messages to Kafka using KafkaClients", expected = "Messages are successfully sent to the Kafka topic"), - @Step(value = "Wait for clients' success validation", expected = "Messages are successfully consumed from the Kafka topic") + @Step(value = "Initialize TestStorage and BridgeClientsBuilder instances.", expected = "Instances are successfully initialized."), + @Step(value = "Create Kafka topic using ResourceManager.", expected = "Kafka topic is created and available."), + @Step(value = "Create Bridge consumer using ResourceManager.", expected = "Bridge consumer is successfully created."), + @Step(value = "Send messages to Kafka using KafkaClients.", expected = "Messages are successfully sent to the Kafka topic."), + @Step(value = "Wait for clients' success validation.", expected = "Messages are successfully consumed from the Kafka topic.") }, labels = { @Label(TestDocsLabels.BRIDGE) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java index f722c0f92d2..682fccd40d9 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java @@ -55,10 +55,10 @@ @SuiteDoc( description = @Desc("Test suite for verifying TLS functionalities in the HTTP Bridge."), beforeTestSteps = { - @Step(value = "Initialize test storage and context", expected = "Test storage and context are initialized successfully"), - @Step(value = "Deploy Kafka and KafkaBridge", expected = "Kafka and KafkaBridge are deployed and running"), - @Step(value = "Create Kafka user with TLS configuration", expected = "Kafka user with TLS configuration is created"), - @Step(value = "Deploy HTTP bridge with TLS configuration", expected = "HTTP bridge is deployed with TLS configuration") + @Step(value = "Initialize test storage and context.", expected = "Test storage and context are initialized successfully."), + @Step(value = "Deploy Kafka and KafkaBridge.", expected = "Kafka and KafkaBridge are deployed and running."), + @Step(value = "Create Kafka user with TLS configuration.", expected = "Kafka user with TLS configuration is created."), + @Step(value = "Deploy HTTP bridge with TLS configuration.", expected = "HTTP bridge is deployed with TLS configuration.") }, labels = { @Label(TestDocsLabels.BRIDGE) @@ -73,12 +73,12 @@ class HttpBridgeTlsST extends AbstractST { @TestDoc( description = @Desc("Test to verify that sending a simple message using TLS works correctly."), steps = { - @Step(value = "Initialize TestStorage and BridgeClients with TLS configuration", expected = "TestStorage and BridgeClients are initialized with TLS configuration"), - @Step(value = "Create Kafka topic using resource manager", expected = "Kafka topic is successfully created"), - @Step(value = "Create Kafka Bridge Client job for producing messages", expected = "Kafka Bridge Client job is created and produces messages successfully"), - @Step(value = "Verify that the producer successfully sends messages", expected = "Producer successfully sends the expected number of messages"), - @Step(value = "Create Kafka client consumer with TLS configuration", expected = "Kafka client consumer is created with TLS configuration"), - @Step(value = "Verify that the consumer successfully receives messages", expected = "Consumer successfully receives the expected number of messages") + @Step(value = "Initialize TestStorage and BridgeClients with TLS configuration.", expected = "TestStorage and BridgeClients are initialized with TLS configuration."), + @Step(value = "Create Kafka topic using resource manager.", expected = "Kafka topic is successfully created."), + @Step(value = "Create Kafka Bridge Client job for producing messages.", expected = "Kafka Bridge Client job is created and produces messages successfully."), + @Step(value = "Verify that the producer successfully sends messages.", expected = "Producer successfully sends the expected number of messages."), + @Step(value = "Create Kafka client consumer with TLS configuration.", expected = "Kafka client consumer is created with TLS configuration."), + @Step(value = "Verify that the consumer successfully receives messages.", expected = "Consumer successfully receives the expected number of messages.") }, labels = { @Label(TestDocsLabels.BRIDGE) @@ -109,13 +109,13 @@ void testSendSimpleMessageTls() { @TestDoc( description = @Desc("Test to verify that a simple message can be received using TLS in a parallel environment."), steps = { - @Step(value = "Initialize the test storage instance", expected = "TestStorage object is instantiated with the test context."), - @Step(value = "Configure Kafka Bridge client for consumption", expected = "Kafka Bridge client is configured with topic and consumer names."), - @Step(value = "Create Kafka topic with provided configurations", expected = "Kafka topic resource is created and available."), - @Step(value = "Deploy the Kafka Bridge consumer", expected = "Kafka Bridge consumer starts successfully and is ready to consume messages."), - @Step(value = "Initialize TLS Kafka client for message production", expected = "TLS Kafka client is configured and initialized."), - @Step(value = "Deploy the Kafka producer TLS client", expected = "TLS Kafka producer client starts successfully and begins sending messages."), - @Step(value = "Verify message consumption", expected = "Messages are successfully consumed by the Kafka Bridge consumer.") + @Step(value = "Initialize the test storage instance.", expected = "TestStorage object is instantiated with the test context."), + @Step(value = "Configure Kafka Bridge client for consumption.", expected = "Kafka Bridge client is configured with topic and consumer names."), + @Step(value = "Create Kafka topic with provided configurations.", expected = "Kafka topic resource is created and available."), + @Step(value = "Deploy the Kafka Bridge consumer.", expected = "Kafka Bridge consumer starts successfully and is ready to consume messages."), + @Step(value = "Initialize TLS Kafka client for message production.", expected = "TLS Kafka client is configured and initialized."), + @Step(value = "Deploy the Kafka producer TLS client.", expected = "TLS Kafka producer client starts successfully and begins sending messages."), + @Step(value = "Verify message consumption.", expected = "Messages are successfully consumed by the Kafka Bridge consumer.") }, labels = { @Label(TestDocsLabels.BRIDGE) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java index ccb037d70b2..a0f3553af12 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java @@ -158,14 +158,14 @@ class ConnectBuilderST extends AbstractST { @TestDoc( description = @Desc("Test that ensures Kafka Connect build fails with wrong artifact checksum and recovers with correct checksum."), steps = { - @Step(value = "Initialize TestStorage and get test image name", expected = "TestStorage instance is created and the image name for the test case is retrieved"), - @Step(value = "Create a Plugin with wrong checksum and build Kafka Connect resource with it", expected = "Kafka Connect resource is created but the build fails due to wrong checksum"), - @Step(value = "Deploy Scraper pod with specific configurations", expected = "Kafka Scraper pod are successfully deployed"), - @Step(value = "Wait for Kafka Connect status to indicate build failure", expected = "Kafka Connect status contains message about build failure"), - @Step(value = "Deploy network policies for Kafka Connect", expected = "Network policies are successfully deployed for Kafka Connect"), - @Step(value = "Replace the plugin checksum with the correct one and update Kafka Connect resource", expected = "Kafka Connect resource is updated with the correct checksum"), - @Step(value = "Wait for Kafka Connect to be ready", expected = "Kafka Connect becomes ready"), - @Step(value = "Verify that EchoSink KafkaConnector is available in Kafka Connect API", expected = "EchoSink KafkaConnector is returned by Kafka Connect API"), + @Step(value = "Initialize TestStorage and get test image name.", expected = "TestStorage instance is created and the image name for the test case is retrieved."), + @Step(value = "Create a Plugin with wrong checksum and build Kafka Connect resource with it.", expected = "Kafka Connect resource is created but the build fails due to wrong checksum."), + @Step(value = "Deploy Scraper pod with specific configurations.", expected = "Kafka Scraper pod are successfully deployed."), + @Step(value = "Wait for Kafka Connect status to indicate build failure.", expected = "Kafka Connect status contains message about build failure."), + @Step(value = "Deploy network policies for Kafka Connect.", expected = "Network policies are successfully deployed for Kafka Connect."), + @Step(value = "Replace the plugin checksum with the correct one and update Kafka Connect resource.", expected = "Kafka Connect resource is updated with the correct checksum."), + @Step(value = "Wait for Kafka Connect to be ready.", expected = "Kafka Connect becomes ready."), + @Step(value = "Verify that EchoSink KafkaConnector is available in Kafka Connect API.", expected = "EchoSink KafkaConnector is returned by Kafka Connect API."), @Step(value = "Verify that EchoSink KafkaConnector is listed in Kafka Connect resource status", expected = "EchoSink KafkaConnector is listed in the status of Kafka Connect resource") }, labels = { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java index 11588ed83d9..4058b924918 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java @@ -117,7 +117,7 @@ @SuiteDoc( description = @Desc("Verifies the deployment, manual rolling update, and undeployment of Kafka Connect components."), beforeTestSteps = { - @Step(value = "Deploy scraper Pod for accessing all other Pods", expected = "Scraper Pod is deployed") + @Step(value = "Deploy scraper Pod for accessing all other Pods.", expected = "Scraper Pod is deployed.") }, labels = { @Label(value = TestDocsLabels.CONNECT) @@ -131,13 +131,13 @@ class ConnectST extends AbstractST { @TestDoc( description = @Desc("Verifies the deployment, manual rolling update, and undeployment of Kafka Connect components."), steps = { - @Step(value = "Initialize Test Storage", expected = "Test storage instance is created with required context"), - @Step(value = "Define expected configurations", expected = "Configurations are loaded from properties file"), - @Step(value = "Create and wait for resources", expected = "Kafka resources, including NodePools and KafkaConnect instances, are created and become ready"), - @Step(value = "Annotate for manual rolling update", expected = "KafkaConnect components are annotated for a manual rolling update"), - @Step(value = "Perform and wait for rolling update", expected = "KafkaConnect components roll and new pods are deployed"), - @Step(value = "Kafka Connect pod", expected = "Pod configurations and annotations are verified"), - @Step(value = "Kafka Connectors", expected = "Various Kafka Connect resource labels and configurations are verified to ensure correct deployment") + @Step(value = "Initialize Test Storage.", expected = "Test storage instance is created with required context."), + @Step(value = "Define expected configurations.", expected = "Configurations are loaded from properties file."), + @Step(value = "Create and wait for resources.", expected = "Kafka resources, including node pools and KafkaConnect instances, are created and become ready."), + @Step(value = "Annotate for manual rolling update.", expected = "KafkaConnect components are annotated for a manual rolling update."), + @Step(value = "Perform and wait for rolling update.", expected = "KafkaConnect components roll and new pods are deployed."), + @Step(value = "Kafka Connect pod.", expected = "Pod configurations and annotations are verified."), + @Step(value = "Kafka Connectors.", expected = "Various Kafka Connect resource labels and configurations are verified to ensure correct deployment.") }, labels = { @Label(value = TestDocsLabels.CONNECT) @@ -261,8 +261,8 @@ void testKafkaConnectAndConnectorStateWithFileSinkPlugin() { @TestDoc( description = @Desc("Test verifying Kafka Connect functionalities with Plain and SCRAM-SHA authentication."), steps = { - @Step(value = "Create object instance of TestStorage", expected = "Instance of TestStorage is created"), - @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create object instance of TestStorage.", expected = "Instance of TestStorage is created."), + @Step(value = "Create node pools using resourceManager based on the configuration.", expected = "node pools for broker and controller are created or not based on configuration."), @Step(value = "Deploy Kafka with SCRAM-SHA-512 listener", expected = "Kafka is deployed with the specified listener authentication"), @Step(value = "Create KafkaUser with SCRAM-SHA authentication", expected = "KafkaUser is created using SCRAM-SHA authentication with the given credentials"), @Step(value = "Create KafkaTopic", expected = "KafkaTopic is created"), @@ -354,7 +354,7 @@ void testKafkaConnectWithPlainAndScramShaAuthentication() { description = @Desc("Test the functionality of Kafka Connect with a File Sink Plugin in a parallel namespace setup."), steps = { @Step(value = "Create and configure test storage", expected = "Test storage is set up with necessary configurations."), - @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), @Step(value = "Create and wait for the broker and controller pools", expected = "Broker and controller pools are created and running."), @Step(value = "Deploy and configure Kafka Connect with File Sink Plugin", expected = "Kafka Connect with File Sink Plugin is deployed and configured."), @Step(value = "Deploy Network Policies for Kafka Connect", expected = "Network Policies are successfully deployed for Kafka Connect."), @@ -420,7 +420,7 @@ void testKafkaConnectAndConnectorFileSinkPlugin() { description = @Desc("Test ensuring the JVM options and resource requests/limits are correctly applied to Kafka Connect components."), steps = { @Step(value = "Create TestStorage instance", expected = "TestStorage instance is created"), - @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), @Step(value = "Create broker and controller node pools", expected = "Node pools are created and ready"), @Step(value = "Create Kafka cluster", expected = "Kafka cluster is created and operational"), @Step(value = "Setup JVM options and resource requirements for Kafka Connect", expected = "Kafka Connect is configured with specified JVM options and resources"), @@ -473,7 +473,7 @@ void testJvmAndResources() { description = @Desc("Test verifying the scaling up and down functionality of Kafka Connect in a Kubernetes environment."), steps = { @Step(value = "Create TestStorage object instance", expected = "Instance of TestStorage is created"), - @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), @Step(value = "Create resources for KafkaNodePools and KafkaCluster", expected = "Resources are created and ready"), @Step(value = "Deploy Kafka Connect with file plugin", expected = "Kafka Connect is deployed with 1 initial replica"), @Step(value = "Verify the initial replica count", expected = "Initial replica count is verified to be 1"), @@ -525,7 +525,7 @@ void testKafkaConnectScaleUpScaleDown() { description = @Desc("This test verifies that Kafka Connect works with TLS and TLS client authentication."), steps = { @Step(value = "Create test storage instance", expected = "Test storage instance is created"), - @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), @Step(value = "Create resources for Kafka broker and Kafka Connect components", expected = "Resources are created and ready"), @Step(value = "Configure Kafka broker with TLS listener and client authentication", expected = "Kafka broker is configured correctly"), @Step(value = "Deploy Kafka user with TLS authentication", expected = "Kafka user is deployed with TLS authentication"), @@ -820,7 +820,7 @@ void testConnectorTaskAutoRestart() { description = @Desc("Test that verifies custom and updated environment variables and readiness/liveness probes for Kafka Connect."), steps = { @Step(value = "Create and configure Kafka Connect with initial values", expected = "Kafka Connect is created and configured with initial environment variables and readiness/liveness probes"), - @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), @Step(value = "Verify initial configuration and environment variables", expected = "Initial configuration and environment variables are as expected"), @Step(value = "Update Kafka Connect configuration and environment variables", expected = "Kafka Connect configuration and environment variables are updated"), @Step(value = "Verify updated configuration and environment variables", expected = "Updated configuration and environment variables are as expected") @@ -1004,7 +1004,7 @@ void testMultiNodeKafkaConnectWithConnectorCreation() { description = @Desc("Test verifying Kafka connect TLS authentication with a username containing unusual characters."), steps = { @Step(value = "Set up a name of username containing dots and 64 characters", expected = ""), - @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), @Step(value = "Create Kafka broker, controller, topic, and Kafka user with the specified username", expected = "Resources are created with the expected configurations"), @Step(value = "Setup Kafka Connect with the created Kafka instance and TLS authentication", expected = "Kafka Connect is set up with the expected configurations"), @Step(value = "Check if the user can produce messages to Kafka", expected = "Messages are produced successfully"), @@ -1093,8 +1093,8 @@ void testConnectTlsAuthWithWeirdUserName() { @TestDoc( description = @Desc("Test verifying that Kafka Connect can authenticate with SCRAM-SHA-512 using a username with special characters and length exceeding typical constraints."), steps = { - @Step(value = "Create resource with Node Pools", expected = "Node Pools created successfully"), - @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create resource with node pools", expected = "Node Pools created successfully"), + @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), @Step(value = "Deploy Kafka cluster with SCRAM-SHA-512 authentication", expected = "Kafka cluster deployed with specified authentications"), @Step(value = "Create Kafka Topic", expected = "Topic created successfully"), @Step(value = "Create Kafka SCRAM-SHA-512 user with a weird username", expected = "User created successfully with SCRAM-SHA-512 credentials"), @@ -1303,7 +1303,7 @@ void testScaleConnectWithConnectorToZero() { description = @Desc("This test verifies the scaling functionality of Kafka Connect and Kafka Connector subresources."), steps = { @Step(value = "Initialize the test storage and create broker and controller pools", expected = "Broker and controller pools are created successfully"), - @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), @Step(value = "Deploy Kafka, Kafka Connect and Kafka Connector resources", expected = "Kafka, Kafka Connect and Kafka Connector resources are deployed successfully"), @Step(value = "Scale Kafka Connect subresource", expected = "Kafka Connect subresource is scaled successfully"), @Step(value = "Verify Kafka Connect subresource scaling", expected = "Kafka Connect replicas and observed generation are as expected"), @@ -1390,7 +1390,7 @@ void testScaleConnectAndConnectorSubresource() { steps = { @Step(value = "Create Secrets and ConfigMaps", expected = "Secrets and ConfigMaps are created successfully."), @Step(value = "Create Kafka environment", expected = "Kafka broker, Kafka Connect, and other resources are deployed successfully."), - @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), @Step(value = "Bind Secrets and ConfigMaps to Kafka Connect", expected = "Secrets and ConfigMaps are bound to Kafka Connect as volumes and environment variables."), @Step(value = "Verify environment variables", expected = "Kafka Connect environment variables contain expected values from Secrets and ConfigMaps."), @Step(value = "Verify mounted volumes", expected = "Kafka Connect mounted volumes contain expected values from Secrets and ConfigMaps.") @@ -1612,7 +1612,7 @@ void testMountingSecretAndConfigMapAsVolumesAndEnvVars() { @TestDoc( description = @Desc("Verifies Kafka Connect functionality when SCRAM-SHA authentication password is changed and the component is rolled."), steps = { - @Step(value = "Create NodePools using resourceManager based on the configuration", expected = "NodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), @Step(value = "Create Kafka cluster with SCRAM-SHA authentication", expected = "Kafka cluster is created with SCRAM-SHA authentication enabled"), @Step(value = "Create a Kafka user with SCRAM-SHA authentication", expected = "Kafka user with SCRAM-SHA authentication is created"), @Step(value = "Deploy Kafka Connect with the created user credentials", expected = "Kafka Connect is deployed successfully"), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java index 109c7590557..15bc7f27615 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java @@ -45,9 +45,9 @@ @Tag(REGRESSION) @SuiteDoc( - description = @Desc("Test ensuring Kafka Connect works properly using ConfigMap and EnvVar configuration."), + description = @Desc("This test verifies Kafka Connect using ConfigMap and EnvVar configuration."), beforeTestSteps = { - @Step(value = "Deploy cluster operator across all namespaces, with custom configuration", expected = "Cluster operator is deployed") + @Step(value = "Deploy cluster operator across all namespaces, with custom configuration.", expected = "Cluster operator is deployed.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -59,16 +59,16 @@ public class ConfigProviderST extends AbstractST { @ParallelNamespaceTest @TestDoc( - description = @Desc("Test ensuring Kafka Connect works properly using ConfigMap and EnvVar configuration."), + description = @Desc("Tests to ensure Kafka Connect functions correctly using ConfigMap and EnvVar configuration."), steps = { - @Step(value = "Create broker and controller pools", expected = "Resources are created and are in ready state"), - @Step(value = "Create Kafka cluster", expected = "Kafka cluster is ready with 3 brokers"), - @Step(value = "Create ConfigMap for connector configuration", expected = "ConfigMap with connector configuration is created"), - @Step(value = "Deploy Kafka Connect with external configuration", expected = "Kafka Connect is deployed with proper configuration"), - @Step(value = "Create necessary Role and RoleBinding for connector", expected = "Role and RoleBinding are created and applied"), - @Step(value = "Deploy Kafka connector", expected = "Kafka connector is successfully deployed"), - @Step(value = "Deploy Kafka clients", expected = "Kafka clients are deployed and ready"), - @Step(value = "Send messages and verify they are written to file sink", expected = "Messages are successfully written to the specified file sink") + @Step(value = "Create broker and controller pools.", expected = "Resources are created and are in ready state."), + @Step(value = "Create Kafka cluster.", expected = "Kafka cluster is ready with 3 brokers."), + @Step(value = "Create ConfigMap for connector configuration.", expected = "ConfigMap with connector configuration is created."), + @Step(value = "Deploy Kafka Connect with external configuration.", expected = "Kafka Connect is deployed with proper configuration."), + @Step(value = "Create necessary Role and RoleBinding for connector.", expected = "Role and RoleBinding are created and applied."), + @Step(value = "Deploy Kafka connector.", expected = "Kafka connector is successfully deployed."), + @Step(value = "Deploy Kafka clients.", expected = "Kafka clients are deployed and ready."), + @Step(value = "Send messages and verify they are written to sink file.", expected = "Messages are successfully written to the specified sink file.") }, labels = { @Label(value = TestDocsLabels.KAFKA) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java index 45678709b91..bd0985e1ebc 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java @@ -56,10 +56,10 @@ @Tag(REGRESSION) @SuiteDoc( - description = @Desc("This test suite verifies various functionalities of Kafka Node Pools in a Kafka cluster."), + description = @Desc("This test suite verifies various functionalities of Kafka node pools in a Kafka cluster."), beforeTestSteps = { - @Step(value = "Ensure the environment is not using OLM or Helm and Kafka Node Pools are enabled", expected = "Environment is validated"), - @Step(value = "Install the default cluster operator", expected = "Cluster operator is installed") + @Step(value = "Ensure the environment is not using OLM or Helm and Kafka node pools are enabled.", expected = "Environment is validated."), + @Step(value = "Install the default Cluster Operator.", expected = "Cluster operator is installed.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -70,12 +70,12 @@ public class KafkaNodePoolST extends AbstractST { @ParallelNamespaceTest @TestDoc( - description = @Desc("This test case verifies the management of broker IDs in Kafka Node Pools using annotations."), + description = @Desc("This test case verifies the management of broker IDs in Kafka node pools using annotations."), steps = { - @Step(value = "Deploy a Kafka instance with annotations to manage Node Pools and Initial NodePool (Initial) to hold Topics and act as controller.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool custom resource, with IDs 90, 91."), - @Step(value = "Deploy additional 2 NodePools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]).", expected = "NodePools are deployed, NodePool A contains ID 4, NodePool B contains IDs 6, 0."), - @Step(value = "Annotate NodePool A 'next-node-ids' and NodePool B 'remove-node-ids' respectively ([20-21],[6,55]) afterward scale to 4 and 1 replicas resp.", expected = "NodePools are scaled, NodePool A contains IDs 4, 20, 21, 1. NodePool B contains ID 0."), - @Step(value = "Annotate NodePool A 'remove-node-ids' and NodePool B 'next-node-ids' respectively ([20],[1]) afterward scale to 2 and 6 replicas resp.", expected = "NodePools are scaled, NodePool A contains IDs 1, 4. NodePool B contains IDs 2, 3, 5.") + @Step(value = "Deploy a Kafka instance with annotations to manage node pools and one initial node pool to hold topics and act as controller.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool custom resource, with IDs 90, 91."), + @Step(value = "Deploy additional 2 node pools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]).", expected = "node pools are deployed, node pool A contains ID 4, node pool B contains IDs 6, 0."), + @Step(value = "Annotate node pool A 'next-node-ids' and node pool B 'remove-node-ids' respectively ([20-21],[6,55]) afterward scale to 4 and 1 replicas resp.", expected = "node pools are scaled, node pool A contains IDs 4, 20, 21, 1. node pool B contains ID 0."), + @Step(value = "Annotate node pool A 'remove-node-ids' and node pool B 'next-node-ids' respectively ([20],[1]) afterward scale to 2 and 6 replicas resp.", expected = "node pools are scaled, node pool A contains IDs 1, 4. node pool B contains IDs 2, 3, 5.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -108,7 +108,7 @@ void testKafkaNodePoolBrokerIdsManagementUsingAnnotations() { PodUtils.waitUntilPodStabilityReplicasCount(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(testStorage.getClusterName(), nodePoolNameInitial), 2); - LOGGER.info("Testing deployment of NodePools with pre-configured annotation: {} is creating Brokers with correct IDs", Annotations.ANNO_STRIMZI_IO_NODE_POOLS); + LOGGER.info("Testing deployment of node pools with pre-configured annotation: {} is creating Brokers with correct IDs", Annotations.ANNO_STRIMZI_IO_NODE_POOLS); // Deploy NodePool A with only 1 replica and next ID 4, and NodePool B with 2 replica and next ID 6 resourceManager.createResourceWithWait(KafkaNodePoolTemplates.brokerPoolPersistentStorage(testStorage.getNamespaceName(), nodePoolNameA, testStorage.getClusterName(), 1) @@ -174,11 +174,11 @@ void testKafkaNodePoolBrokerIdsManagementUsingAnnotations() { @ParallelNamespaceTest @TestDoc( - description = @Desc("This test case verifies changing of roles in Kafka Node Pools."), + description = @Desc("This test case verifies changing of roles in Kafka node pools."), steps = { - @Step(value = "Deploy a Kafka instance with annotations to manage Node Pools and Initial 2 NodePools, both with mixed role, first one stable, second one which will be modified.", expected = "Kafka instance with initial Node Pools is deployed."), + @Step(value = "Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools, both with mixed role, first one stable, second one which will be modified.", expected = "Kafka instance with initial node pools is deployed."), @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present.", expected = "KafkaTopic is created."), - @Step(value = "Annotate one of Node Pools to perform manual Rolling Update.", expected = "Rolling Update started."), + @Step(value = "Annotate one of node pools to perform manual Rolling Update.", expected = "Rolling Update started."), @Step(value = "Change role of Kafka Node Pool from mixed to controller only role.", expected = "Role Change is prevented due to existing KafkaTopic replicas and ongoing Rolling Update."), @Step(value = "Original Rolling Update finishes successfully.", expected = "Rolling Update is completed."), @Step(value = "Delete previously created KafkaTopic.", expected = "KafkaTopic is deleted and Node Pool role change is initiated."), @@ -255,13 +255,13 @@ void testNodePoolsRolesChanging() { @ParallelNamespaceTest @TestDoc( - description = @Desc("This test case verifies the possibility of adding and removing Kafka Node Pools into an existing Kafka cluster."), + description = @Desc("This test case verifies the possibility of adding and removing Kafka node pools into an existing Kafka cluster."), steps = { - @Step(value = "Deploy a Kafka instance with annotations to manage Node Pools and Initial 2 NodePools.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool custom resource."), + @Step(value = "Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool custom resource."), @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully, KafkaTopic created and cleaned as expected."), @Step(value = "Add extra KafkaNodePool with broker role to the Kafka.", expected = "KafkaNodePool is deployed and ready."), @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully, KafkaTopic created and cleaned as expected."), - @Step(value = "Remove one of kafkaNodePool with broker role.", expected = "KafkaNodePool is removed, Pods are deleted, but other pods in Kafka are stable and ready."), + @Step(value = "Remove one kafkaNodePool with broker role.", expected = "KafkaNodePool is removed, Pods are deleted, but other pods in Kafka are stable and ready."), @Step(value = "Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully, KafkaTopic created and cleaned as expected.") }, labels = { @@ -317,14 +317,14 @@ void testNodePoolsAdditionAndRemoval() { @ParallelNamespaceTest @TestDoc( - description = @Desc("This test case verifies transfer of Kafka Cluster from and to management by KafkaNodePool, by creating corresponding Kafka and KafkaNodePool custom resources and manipulating according Kafka annotation."), + description = @Desc("This test verifies Kafka Cluster migration to and from node pools, using the necessary Kafka and KafkaNodePool resources and annotations."), steps = { - @Step(value = "Deploy Kafka with annotated to enable management by KafkaNodePool, and KafkaNodePool targeting given Kafka Cluster.", expected = "Kafka is deployed, KafkaNodePool custom resource is targeting Kafka Cluster as expected."), + @Step(value = "Deploy a Kafka cluster with the annotation to enable node pool management, and configure a KafkaNodePool resource to target the Kafka cluster.", expected = "Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected."), @Step(value = "Modify KafkaNodePool by increasing number of Kafka Replicas.", expected = "Number of Kafka Pods is increased to match specification from KafkaNodePool."), @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages."), - @Step(value = "Modify Kafka custom resource annotation strimzi.io/node-pool to disable management by KafkaNodePool.", expected = "StrimziPodSet is modified, replacing former one, Pods are replaced and specification from KafkaNodePool (i.e., changed replica count) are ignored."), + @Step(value = "Disable KafkaNodePool management in the Kafka custom resource using the node pool annotation.", expected = " StrimziPodSet is modified, pods are replaced, and any KafkaNodePool specifications (i.e., changed replica count) are ignored."), @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages."), - @Step(value = "Modify Kafka custom resource annotation strimzi.io/node-pool to enable management by KafkaNodePool.", expected = "New StrimziPodSet is created, replacing former one, Pods are replaced and specification from KafkaNodePool (i.e., changed replica count) has priority over Kafka specification."), + @Step(value = "Enable node pool management in the Kafka custom resource using the node pool annotation.", expected = "New StrimziPodSet is created, pods are replaced , and any KafkaNodePool specifications (i.e., changed replica count) take priority over Kafka specifications."), @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages.") }, labels = { @@ -380,7 +380,7 @@ void testKafkaManagementTransferToAndFromKafkaNodePool() { LOGGER.info("Disable KafkaNodePool in Kafka Cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getClusterName()); KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> { kafka.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "disabled"); - // because Kafka CR with NodePools is missing .spec.kafka.replicas and .spec.kafka.storage, we need to + // because Kafka CR with node pools is missing .spec.kafka.replicas and .spec.kafka.storage, we need to // set those here kafka.getSpec().getKafka().setReplicas(originalKafkaReplicaCount); kafka.getSpec().getKafka().setStorage(new PersistentClaimStorageBuilder() diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java index 0fd7832fbe7..b2a8b52520b 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java @@ -113,9 +113,9 @@ @Tag(REGRESSION) @SuppressWarnings("checkstyle:ClassFanOutComplexity") @SuiteDoc( - description = @Desc("Suite containing kafka related stuff (i.e., JVM resources, EO, TO or UO removal from Kafka cluster), which ensures proper functioning of Kafka clusters."), + description = @Desc("Test suite containing kafka related stuff (i.e., JVM resources, EO, TO or UO removal from Kafka cluster), which ensures proper functioning of Kafka clusters."), beforeTestSteps = { - @Step(value = "Deploy cluster operator across all namespaces, with custom configuration", expected = "Cluster operator is deployed") + @Step(value = "Deploy Cluster Operator across all namespaces, with custom configuration.", expected = "Cluster Operator is deployed.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -130,9 +130,9 @@ class KafkaST extends AbstractST { @TestDoc( description = @Desc("This test case verifies that Pod's resources (limits and requests), custom JVM configurations, and expected Java configuration are propagated correctly to Pods, containers, and processes."), steps = { - @Step(value = "Deploy Kafka and its components with custom specifications, including specifying resources and JVM configuration", expected = "Kafka and its components (ZooKeeper, Entity Operator) are deployed"), - @Step(value = "For each of components (Kafka, ZooKeeper, Topic Operator, User Operator), verify specified configuration of JVM, resources, and also environment variables", expected = "Each of the components has requests and limits assigned correctly, JVM, and environment variables configured according to the specification."), - @Step(value = "Wait for a time to observe that none of initiated components needed Rolling Update", expected = "All of Kafka components remained in stable state.") + @Step(value = "Deploy Kafka and its components with custom specifications, including specifying resources and JVM configuration.", expected = "Kafka and its components (ZooKeeper, Entity Operator) are deployed."), + @Step(value = "For each component (Kafka, ZooKeeper, Topic Operator, User Operator), verify specified configuration of JVM, resources, and also environment variables.", expected = "Each of the components has requests and limits assigned correctly, JVM, and environment variables configured according to the specification."), + @Step(value = "Wait for a time to observe that no initiated components need rolling update.", expected = "All Kafka components remain in stable state.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -310,13 +310,13 @@ void testJvmAndResources() { @ParallelNamespaceTest @TestDoc( - description = @Desc("This test case verifies the correct deployment of Entity Operator, i.e., including both User Operator and Topic Operator. Entity Operator is firstly modified to exclude User Operator, afterwards it is modified to default configuration, which includes User Operator. The next step is removal of Topic Operator itself and finally, also removing User Operator, with Topic Operator being already removed."), + description = @Desc("his test case verifies the correct deployment of the Entity Operator, including both the User Operator and Topic Operator. First, the Entity Operator is modified to exclude the User Operator. Then, it's restored to its default configuration, which includes the User Operator. Next, the Topic Operator is removed, followed by the User Operator, with the Topic Operator already excluded"), steps = { - @Step(value = "Deploy Kafka with Entity Operator set.", expected = "Kafka is deployed, and Entity Operator consist of both Topic and User Operators"), - @Step(value = "Remove User Operator from the Kafka specification", expected = "User Operator container is deleted"), - @Step(value = "Set User Operator back in the Kafka specification", expected = "User Operator container is recreated"), - @Step(value = "Remove Topic Operator from the Kafka specification", expected = "Topic Operator container is removed from Entity Operator"), - @Step(value = "Remove User Operator from the Kafka specification", expected = "Entity Operator Pod is removed, as there are no other containers present.") + @Step(value = "Deploy Kafka with Entity Operator set.", expected = "Kafka is deployed, and Entity Operator consists of both Topic Operator and User Operator."), + @Step(value = "Remove User Operator from the Kafka specification.", expected = "User Operator container is deleted."), + @Step(value = "Set User Operator back in the Kafka specification.", expected = "User Operator container is recreated."), + @Step(value = "Remove Topic Operator from the Kafka specification.", expected = "Topic Operator container is removed from Entity Operator."), + @Step(value = "Remove User Operator from the Kafka specification.", expected = "Entity Operator Pod is removed, as there are no other containers present.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -395,13 +395,13 @@ void testRemoveComponentsFromEntityOperator() { @ParallelNamespaceTest @TestDoc( - description = @Desc("This test case verifies that Kafka with persistent storage, and JBOD storage, property 'delete claim' of JBOD storage."), + description = @Desc("This test case verifies Kafka running with persistent JBOD storage, and configured with the `deleteClaim` storage property."), steps = { - @Step(value = "Deploy Kafka with persistent storage and JBOD storage with 2 volumes, both of these are configured to delete their Persistent Volume Claims on Kafka cluster un-provision.", expected = "Kafka is deployed, volumes are labeled and linked to Pods correctly."), - @Step(value = "Verify that labels in Persistent Volume Claims are set correctly.", expected = "Persistent Volume Claims do contain expected labels and values."), - @Step(value = "Modify Kafka Custom Resource, specifically 'delete claim' property of its first Kafka Volume.", expected = "Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by Cluster Operator."), - @Step(value = "Delete Kafka cluster.", expected = "Kafka cluster and its components are deleted, including Persistent Volume Claim of Volume with 'delete claim' property set to true."), - @Step(value = "Verify remaining Persistent Volume Claims.", expected = "Persistent Volume Claim referenced by volume of formerly deleted Kafka Custom Resource with property 'delete claim' set to true is still present.") + @Step(value = "Deploy Kafka with persistent storage and JBOD storage with 2 volumes, both of which are configured to delete their Persistent Volume Claims on Kafka cluster un-provision.", expected = "Kafka is deployed, volumes are labeled and linked to Pods correctly."), + @Step(value = "Verify that labels in Persistent Volume Claims are set correctly.", expected = "Persistent Volume Claims contains expected labels and values."), + @Step(value = "Modify Kafka Custom Resource, specifically 'deleteClaim' property of its first Kafka Volume.", expected = "Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by Cluster Operator."), + @Step(value = "Delete Kafka cluster.", expected = "Kafka cluster and its components are deleted, including Persistent Volume Claim of Volume with 'deleteClaim' property set to true."), + @Step(value = "Verify remaining Persistent Volume Claims.", expected = "Persistent Volume Claim referenced by volume of formerly deleted Kafka Custom Resource with property 'deleteClaim' set to true is still present.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -488,10 +488,10 @@ void testKafkaJBODDeleteClaimsTrueFalse() { @TestDoc( description = @Desc("Test regenerates certificates after changing Kafka's external address."), steps = { - @Step(value = "Create Kafka without external listener", expected = "Kafka instance is created without an external listener"), - @Step(value = "Edit Kafka to include an external listener", expected = "External listener is correctly added to the Kafka instance"), - @Step(value = "Wait until the Kafka component has rolled", expected = "Kafka component rolls successfully with the new external listener"), - @Step(value = "Compare Kafka broker secrets before and after adding external listener", expected = "Secrets are different before and after adding the external listener") + @Step(value = "Create Kafka without external listener.", expected = "Kafka instance is created without an external listener."), + @Step(value = "Edit Kafka to include an external listener.", expected = "External listener is correctly added to the Kafka instance."), + @Step(value = "Wait until the Kafka component has rolled.", expected = "Kafka component rolls successfully with the new external listener."), + @Step(value = "Compare Kafka broker secrets before and after adding external listener.", expected = "Secrets are different before and after adding the external listener.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -550,12 +550,12 @@ void testRegenerateCertExternalAddressChange() { @ParallelNamespaceTest @SuppressWarnings({"checkstyle:JavaNCSS", "checkstyle:NPathComplexity", "checkstyle:MethodLength", "checkstyle:CyclomaticComplexity"}) @TestDoc( - description = @Desc("This test case verifies the presence of expected Strimzi specific labels, also labels and annotations specified by user. Some of user-specified labels are later modified (new one is added, one is modified) which triggers rolling update after which all changes took place as expected."), + description = @Desc("This test case verifies the presence of expected Strimzi specific labels, also labels and annotations specified by user. Some user-specified labels are later modified (new one is added, one is modified) which triggers rolling update after which all changes took place as expected."), steps = { - @Step(value = "Deploy Kafka with persistent storage and specify custom labels in CR metadata, and also other labels and annotation in PVC metadata", expected = "Kafka is deployed with its default labels and all others specified by user."), - @Step(value = "Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected", expected = "Producer and Consumer are able to produce and consume messages respectively."), - @Step(value = "Modify configuration of Kafka CR with addition of new labels and modification of existing", expected = "Kafka is rolling and new labels are present in Kafka CR, and managed resources"), - @Step(value = "Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected", expected = "Producer and Consumer are able to produce and consume messages respectively.") + @Step(value = "Deploy Kafka with persistent storage and specify custom labels in CR metadata, and also other labels and annotation in PVC metadata.", expected = "Kafka is deployed with its default labels and all others specified by user."), + @Step(value = "Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected.", expected = "Producer and Consumer are able to produce and consume messages respectively."), + @Step(value = "Modify configuration of Kafka CR with addition of new labels and modification of existing.", expected = "Kafka is rolling and new labels are present in Kafka CR, and managed resources."), + @Step(value = "Deploy Producer and Consumer configured to produce and consume default number of messages, to make sure Kafka works as expected.", expected = "Producer and Consumer are able to produce and consume messages respectively.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -566,16 +566,16 @@ void testLabelsExistenceAndManipulation() { // label key and values to be used as part of kafka CR final String firstKafkaLabelKey = "first-kafka-label-key"; - final String firstKafkaLabelValue = "first-kafka-label-value"; + final String firstKafkaLabelValue = "first-kafka-label-value."; final String secondKafkaLabelKey = "second-kafka-label-key"; - final String secondKafkaLabelValue = "second-kafka-label-value"; + final String secondKafkaLabelValue = "second-kafka-label-value."; final Map customSpecifiedLabels = new HashMap<>(); customSpecifiedLabels.put(firstKafkaLabelKey, firstKafkaLabelValue); customSpecifiedLabels.put(secondKafkaLabelKey, secondKafkaLabelValue); // label key and value used in addition for while creating kafka CR (as part of PVCs label and annotation) final String pvcLabelOrAnnotationKey = "pvc-label-annotation-key"; - final String pvcLabelOrAnnotationValue = "pvc-label-annotation-value"; + final String pvcLabelOrAnnotationValue = "pvc-label-annotation-value."; final Map customSpecifiedLabelOrAnnotationPvc = new HashMap<>(); customSpecifiedLabelOrAnnotationPvc.put(pvcLabelOrAnnotationKey, pvcLabelOrAnnotationValue); @@ -761,7 +761,7 @@ void testLabelsExistenceAndManipulation() { // key-value pairs modification and addition of user specified labels for kafka CR metadata final String firstKafkaLabelValueModified = "first-kafka-label-value-modified"; final String thirdKafkaLabelKey = "third-kafka-label-key"; - final String thirdKafkaLabelValue = "third-kafka-label-value"; + final String thirdKafkaLabelValue = "third-kafka-label-value."; customSpecifiedLabels.replace(firstKafkaLabelKey, firstKafkaLabelValueModified); customSpecifiedLabels.put(thirdKafkaLabelKey, thirdKafkaLabelValue); LOGGER.info("New values of labels which are to modify Kafka CR after their replacement and addition of new one are following {}", customSpecifiedLabels); @@ -1059,7 +1059,7 @@ void testReadOnlyRootFileSystem() { @TestDoc( description = @Desc("Test to ensure that deploying Kafka with an unsupported version results in the expected error."), steps = { - @Step(value = "Initialize test storage with current context", expected = "Test storage is initialized"), + @Step(value = "Initialize test storage with current context.", expected = "Test storage is initialized."), @Step(value = "Create Kafka node pools", expected = "Kafka node pools are created and ready"), @Step(value = "Deploy Kafka with a non-existing version", expected = "Kafka deployment with non-supported version begins"), @Step(value = "Log Kafka deployment process", expected = "Log entry for Kafka deployment is created"), @@ -1225,7 +1225,7 @@ void testResizeJbodVolumes() { @TestDoc( description = @Desc("This test case verifies basic working of Kafka Cluster managed by Cluster Operator with KRaft."), steps = { - @Step(value = "Deploy Kafka annotated to enable KRaft (and additionally annotated to enable management by KafkaNodePool due to default usage of NodePools), and KafkaNodePool targeting given Kafka Cluster.", expected = "Kafka is deployed, KafkaNodePool custom resource is targeting Kafka Cluster as expected."), + @Step(value = "Deploy Kafka annotated to enable KRaft (and additionally annotated to enable node pool management), and configure a KafkaNodePool resource to target the Kafka cluster.", expected = "Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected."), @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages."), @Step(value = "Trigger manual Rolling Update.", expected = "Rolling update is triggered and completed shortly after.") }, diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java index a9f9428664d..18dd9ec8763 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java @@ -37,9 +37,9 @@ @Tag(KAFKA_SMOKE) @SuiteDoc( - description = @Desc("Test checking basic functionality for each supported Kafka version. Ensures that Kafka functionality such as deployment, Topic Operator, User Operator, and message transmission via PLAIN and TLS listeners are working correctly."), + description = @Desc("Verifies the basic functionality for each supported Kafka version."), beforeTestSteps = { - @Step(value = "Deploy cluster operator with default installation", expected = "Cluster operator is deployed") + @Step(value = "Deploy Cluster Operator with default installation.", expected = "Cluster Operator is deployed.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -52,13 +52,13 @@ public class KafkaVersionsST extends AbstractST { @ParameterizedTest(name = "Kafka version: {0}.version()") @MethodSource("io.strimzi.systemtest.utils.TestKafkaVersion#getSupportedKafkaVersions") @TestDoc( - description = @Desc("Test checking basic functionality for each supported Kafka version. Ensures that Kafka functionality such as deployment, Topic Operator, User Operator, and message transmission via PLAIN and TLS listeners are working correctly."), + description = @Desc("Tests the basic functionality for each supported Kafka version, ensuring that deployment, Topic Operator, User Operator, and message transmission via PLAIN and TLS listeners work correctly."), steps = { - @Step(value = "Deploy Kafka cluster with specified version", expected = "Kafka cluster is deployed without any issue"), - @Step(value = "Verify the Topic Operator creation", expected = "Topic Operator is working correctly"), - @Step(value = "Verify the User Operator creation", expected = "User Operator is working correctly with SCRAM-SHA and ACLs"), - @Step(value = "Send and receive messages via PLAIN with SCRAM-SHA", expected = "Messages are sent and received successfully"), - @Step(value = "Send and receive messages via TLS", expected = "Messages are sent and received successfully") + @Step(value = "Deploy Kafka cluster with specified version.", expected = "Kafka cluster is deployed without any issue."), + @Step(value = "Verify the Topic Operator creation.", expected = "Topic Operator is working correctly."), + @Step(value = "Verify the User Operator creation.", expected = "User Operator is working correctly with SCRAM-SHA and ACLs."), + @Step(value = "Send and receive messages via PLAIN with SCRAM-SHA.", expected = "Messages are sent and received successfully."), + @Step(value = "Send and receive messages via TLS.", expected = "Messages are sent and received successfully.") }, labels = { @Label(value = TestDocsLabels.KAFKA) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java index e6d2679fab6..11027e0a01a 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java @@ -44,9 +44,9 @@ import static org.junit.jupiter.api.Assumptions.assumeFalse; @SuiteDoc( - description = @Desc("NOTE: STs in this class will not properly work on `minikube` clusters (and maybe not on other clusters that use local storage), because the calculation of currently used storage is based on the local storage, which can be shared across multiple Docker containers. To properly run this suite, you should use cluster with proper storage."), + description = @Desc("NOTE: STs in this class will not work properly on `minikube` clusters (and maybe not on other clusters that use local storage), because the calculation of currently used storage is based on the local storage, which can be shared across multiple Docker containers. To properly run this suite, you should use cluster with proper storage."), beforeTestSteps = { - @Step(value = "Deploy default cluster operator with the required configurations", expected = "Cluster operator is deployed") + @Step(value = "Deploy default cluster operator with the required configurations.", expected = "Cluster operator is deployed.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -59,12 +59,12 @@ public class QuotasST extends AbstractST { @TestDoc( description = @Desc("Test to check Kafka Quotas Plugin for disk space."), steps = { - @Step(value = "Assume the cluster is not Minikube or MicroShift", expected = "Cluster is appropriate for the test"), - @Step(value = "Create necessary resources for Kafka and nodes", expected = "Resources are created and Kafka is set up with quotas plugin"), - @Step(value = "Send messages without any user; observe quota enforcement", expected = "Producer stops after reaching the minimum available bytes"), - @Step(value = "Check Kafka logs for quota enforcement message", expected = "Kafka logs contain the expected quota enforcement message"), - @Step(value = "Send messages with excluded user and observe the behavior", expected = "Messages are sent successfully without hitting the quota"), - @Step(value = "Clean up resources", expected = "Resources are deleted successfully") + @Step(value = "Assume the cluster is not Minikube or MicroShift.", expected = "Cluster is appropriate for the test."), + @Step(value = "Create necessary resources for Kafka and nodes.", expected = "Resources are created and Kafka is set up with quotas plugin."), + @Step(value = "Send messages without any user; observe quota enforcement.", expected = "Producer stops after reaching the minimum available bytes."), + @Step(value = "Check Kafka logs for quota enforcement message.", expected = "Kafka logs contain the expected quota enforcement message."), + @Step(value = "Send messages with excluded user and observe the behavior.", expected = "Messages are sent successfully without hitting the quota."), + @Step(value = "Clean up resources.", expected = "Resources are deleted successfully.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -151,12 +151,12 @@ void testKafkaQuotasPluginIntegration() { @TestDoc( description = @Desc("Test verifying bandwidth limitations with Kafka quotas plugin."), steps = { - @Step(value = "Set excluded principal", expected = "Principal is set"), - @Step(value = "Create Kafka resources including node pools and persistent Kafka with quotas enabled", expected = "Kafka resources are created successfully with quotas setup"), - @Step(value = "Create Kafka topic and user with SCRAM-SHA authentication", expected = "Kafka topic and SCRAM-SHA user are created successfully"), - @Step(value = "Send messages with normal user", expected = "Messages are sent and duration is measured"), - @Step(value = "Send messages with excluded user", expected = "Messages are sent and duration is measured"), - @Step(value = "Assert that time taken for normal user is greater than for excluded user", expected = "Assertion is successful") + @Step(value = "Set excluded principal.", expected = "Principal is set."), + @Step(value = "Create Kafka resources including node pools and persistent Kafka with quotas enabled.", expected = "Kafka resources are created successfully with quotas setup."), + @Step(value = "Create Kafka topic and user with SCRAM-SHA authentication.", expected = "Kafka topic and SCRAM-SHA user are created successfully."), + @Step(value = "Send messages with normal user.", expected = "Messages are sent and duration is measured."), + @Step(value = "Send messages with excluded user.", expected = "Messages are sent and duration is measured."), + @Step(value = "Assert that time taken for normal user is greater than for excluded user.", expected = "Assertion is successful.") }, labels = { @Label(value = TestDocsLabels.KAFKA) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java index a99900e4797..66fa28061ea 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java @@ -52,11 +52,11 @@ @SuiteDoc( description = @Desc("This test suite covers scenarios for Tiered Storage integration implemented within Strimzi."), beforeTestSteps = { - @Step(value = "Create test namespace", expected = "Namespace is created"), - @Step(value = "Build Kafka image based on passed parameters like image full name, base image, Dockerfile path (via Kaniko or OpenShift build)", expected = "Kafka image is built"), - @Step(value = "Deploy Minio in test namespace and init the client inside the Minio pod", expected = "Minio is deployed and client is initialized"), - @Step(value = "Init bucket in Minio for purposes of these tests", expected = "Bucket is initialized in Minio"), - @Step(value = "Deploy Strimzi Cluster Operator", expected = "Strimzi Cluster Operator is deployed") + @Step(value = "Create test namespace.", expected = "Namespace is created."), + @Step(value = "Build Kafka image based on passed parameters like image full name, base image, Dockerfile path (via Kaniko or OpenShift build).", expected = "Kafka image is built."), + @Step(value = "Deploy Minio in test namespace and init the client inside the Minio pod.", expected = "Minio is deployed and client is initialized."), + @Step(value = "Init bucket in Minio for purposes of these tests.", expected = "Bucket is initialized in Minio."), + @Step(value = "Deploy Cluster Operator.", expected = "Cluster Operator is deployed.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -73,13 +73,13 @@ public class TieredStorageST extends AbstractST { @ParallelTest @TestDoc( - description = @Desc("This testcase is focused on testing of Tiered Storage integration implemented within Strimzi. The tests use Aiven Tiered Storage plugin - ..."), + description = @Desc("This testcase is focused on testing of Tiered Storage integration implemented within Strimzi. The tests use Aiven Tiered Storage plugin (tiered-storage-for-apache-kafka)."), steps = { - @Step(value = "Deploys KafkaNodePool resource with Broker NodePool with PV of size 10Gi", expected = "KafkaNodePool resource is deployed successfully with specified configuration"), - @Step(value = "Deploys Kafka resource with configuration of Tiered Storage for Aiven plugin, pointing to Minio S3, and with image built in beforeAll", expected = "Kafka resource is deployed successfully with Tiered Storage configuration"), - @Step(value = "Creates topic with enabled Tiered Storage sync with size of segments set to 10mb (this is needed for speedup the sync)", expected = "Topic is created successfully with Tiered Storage enabled and segment size of 10mb"), - @Step(value = "Starts continuous producer to send data to Kafka", expected = "Continuous producer starts sending data to Kafka"), - @Step(value = "Wait until Minio size is not empty (contains data from Kafka)", expected = "Minio contains data from Kafka") + @Step(value = "Deploys KafkaNodePool resource with PV of size 10Gi.", expected = "KafkaNodePool resource is deployed successfully with specified configuration."), + @Step(value = "Deploys Kafka resource with configuration of Tiered Storage for Aiven plugin, pointing to Minio S3, and with image built in beforeAll.", expected = "Kafka resource is deployed successfully with Tiered Storage configuration."), + @Step(value = "Creates topic with enabled Tiered Storage sync with size of segments set to 10mb (this is needed to speed up the sync).", expected = "Topic is created successfully with Tiered Storage enabled and segment size of 10mb."), + @Step(value = "Starts continuous producer to send data to Kafka.", expected = "Continuous producer starts sending data to Kafka."), + @Step(value = "Wait until Minio size is not empty (contains data from Kafka).", expected = "Minio contains data from Kafka.") }, labels = { @Label(value = TestDocsLabels.KAFKA) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java index de311b30192..7053e7e105d 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java @@ -65,7 +65,7 @@ @SuiteDoc( description = @Desc("DynamicConfST is responsible for verifying that changes in dynamic Kafka configuration do not trigger a rolling update."), beforeTestSteps = { - @Step(value = "Deploy the Cluster Operator", expected = "Cluster Operator is installed successfully") + @Step(value = "Deploy the Cluster Operator.", expected = "Cluster Operator is installed successfully.") } ) public class DynamicConfST extends AbstractST { @@ -79,13 +79,13 @@ public class DynamicConfST extends AbstractST { @TestDoc( description = @Desc("Test for verifying dynamic configuration changes in a Kafka cluster with multiple clusters in one namespace."), steps = { - @Step(value = "Deep copy shard Kafka configuration", expected = "Configuration map is duplicated with deep copy"), - @Step(value = "Create resources with wait", expected = "Resources are created and ready"), - @Step(value = "Create scraper pod", expected = "Scraper pod is created"), - @Step(value = "Retrieve and verify Kafka configurations from ConfigMaps", expected = "Configurations meet expected values"), - @Step(value = "Retrieve Kafka broker configuration via CLI", expected = "Dynamic configurations are retrieved"), - @Step(value = "Update Kafka configuration for unclean leader election", expected = "Configuration is updated and verified for dynamic property"), - @Step(value = "Verify updated Kafka configurations", expected = "Updated configurations are persistent and correct") + @Step(value = "Deep copy shared Kafka configuration.", expected = "Configuration map is duplicated with deep copy."), + @Step(value = "Create resources with wait.", expected = "Resources are created and ready."), + @Step(value = "Create scraper pod.", expected = "Scraper pod is created."), + @Step(value = "Retrieve and verify Kafka configurations from ConfigMaps.", expected = "Configurations meet expected values."), + @Step(value = "Retrieve Kafka broker configuration via CLI.", expected = "Dynamic configurations are retrieved."), + @Step(value = "Update Kafka configuration for unclean leader election.", expected = "Configuration is updated and verified for dynamic property."), + @Step(value = "Verify updated Kafka configurations.", expected = "Updated configurations are persistent and correct.") }, labels = { @Label(value = TestDocsLabels.DYNAMIC_CONFIGURATION), @@ -95,7 +95,7 @@ public class DynamicConfST extends AbstractST { void testSimpleDynamicConfiguration() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); - Map deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream() + Map deepCopyOfSharedKafkaConfig = kafkaConfig.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); resourceManager.createResourceWithWait( @@ -107,7 +107,7 @@ void testSimpleDynamicConfiguration() { resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), testStorage.getClusterName(), KAFKA_REPLICAS, 1) .editSpec() .editKafka() - .withConfig(deepCopyOfShardKafkaConfig) + .withConfig(deepCopyOfSharedKafkaConfig) .endKafka() .endSpec() .build(), @@ -128,9 +128,9 @@ void testSimpleDynamicConfiguration() { String kafkaConfigurationFromPod = KafkaCmdClient.describeKafkaBrokerUsingPodCli(Environment.TEST_SUITE_NAMESPACE, scraperPodName, KafkaResources.plainBootstrapAddress(testStorage.getClusterName()), podNum); assertThat(kafkaConfigurationFromPod, containsString("Dynamic configs for broker 0 are:\n")); - deepCopyOfShardKafkaConfig.put("unclean.leader.election.enable", true); + deepCopyOfSharedKafkaConfig.put("unclean.leader.election.enable", true); - updateAndVerifyDynConf(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), deepCopyOfShardKafkaConfig); + updateAndVerifyDynConf(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), deepCopyOfSharedKafkaConfig); // Wait until the configuration is properly set and returned by Kafka Admin API StUtils.waitUntilSupplierIsSatisfied("unclean.leader.election.enable=true is available in Broker config", () -> @@ -315,15 +315,15 @@ void testUpdateToExternalListenerCausesRollingRestart() { @TestDoc( description = @Desc("Test validating that updating Kafka cluster listeners to use external clients causes a rolling restart."), steps = { - @Step(value = "Setup initial Kafka cluster and resources", expected = "Kafka cluster and resources are successfully created"), - @Step(value = "Create external Kafka clients and verify message production/consumption on plain listener", expected = "Messages are successfully produced and consumed using plain listener"), - @Step(value = "Attempt to produce/consume messages using TLS listener before update", expected = "Exception is thrown because the listener is plain"), - @Step(value = "Update Kafka cluster to use external TLS listener", expected = "Kafka cluster is updated and rolling restart occurs"), - @Step(value = "Verify message production/consumption using TLS listener after update", expected = "Messages are successfully produced and consumed using TLS listener"), - @Step(value = "Attempt to produce/consume messages using plain listener after TLS update", expected = "Exception is thrown because the listener is TLS"), - @Step(value = "Revert Kafka cluster listener to plain", expected = "Kafka cluster listener is reverted and rolling restart occurs"), - @Step(value = "Verify message production/consumption on plain listener after reverting", expected = "Messages are successfully produced and consumed using plain listener"), - @Step(value = "Attempt to produce/consume messages using TLS listener after reverting", expected = "Exception is thrown because the listener is plain") + @Step(value = "Setup initial Kafka cluster and resources.", expected = "Kafka cluster and resources are successfully created."), + @Step(value = "Create external Kafka clients and verify message production/consumption on plain listener.", expected = "Messages are successfully produced and consumed using plain listener."), + @Step(value = "Attempt to produce/consume messages using TLS listener before update.", expected = "Exception is thrown because the listener is plain."), + @Step(value = "Update Kafka cluster to use external TLS listener.", expected = "Kafka cluster is updated and rolling restart occurs."), + @Step(value = "Verify message production/consumption using TLS listener after update.", expected = "Messages are successfully produced and consumed using TLS listener."), + @Step(value = "Attempt to produce/consume messages using plain listener after TLS update.", expected = "Exception is thrown because the listener is TLS."), + @Step(value = "Revert Kafka cluster listener to plain.", expected = "Kafka cluster listener is reverted and rolling restart occurs."), + @Step(value = "Verify message production/consumption on plain listener after reverting.", expected = "Messages are successfully produced and consumed using plain listener."), + @Step(value = "Attempt to produce/consume messages using TLS listener after reverting.", expected = "Exception is thrown because the listener is plain.") }, labels = { @Label(value = TestDocsLabels.DYNAMIC_CONFIGURATION), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java index 95bdb970c7f..5b88f2da459 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java @@ -50,9 +50,9 @@ @SuiteDoc( description = @Desc("DynamicConfigurationSharedST is responsible for verifying that changing dynamic Kafka configuration will not trigger a rolling update. Shared -> for each test case we use the same Kafka resource configuration."), beforeTestSteps = { - @Step(value = "Run cluster operator installation", expected = "Cluster operator is installed"), - @Step(value = "Deploy shared Kafka across all test cases", expected = "Shared Kafka is deployed"), - @Step(value = "Deploy scraper pod", expected = "Scraper pod is deployed") + @Step(value = "Run Cluster Operator installation.", expected = "Cluster Operator is installed."), + @Step(value = "Deploy shared Kafka across all test cases.", expected = "Shared Kafka is deployed."), + @Step(value = "Deploy scraper pod.", expected = "Scraper pod is deployed.") }, labels = { @Label(value = TestDocsLabels.DYNAMIC_CONFIGURATION), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java index 29b9e74ed8a..1a4816e8195 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java @@ -100,7 +100,7 @@ @SuiteDoc( description = @Desc("This class demonstrates various tests for Kafka listeners using different authentication mechanisms."), beforeTestSteps = { - @Step(value = "Install the cluster operator with default settings", expected = "Cluster operator is installed successfully") + @Step(value = "Install the cluster operator with default settings.", expected = "Cluster operator is installed successfully.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -141,10 +141,10 @@ public class ListenersST extends AbstractST { @TestDoc( description = @Desc("Test sending messages over plain transport, without auth"), steps = { - @Step(value = "Create Kafka resources with wait", expected = "Kafka broker, controller, and topic are created"), - @Step(value = "Log transmission message", expected = "Transmission message is logged"), - @Step(value = "Produce and consume messages with plain clients", expected = "Messages are successfully produced and consumed"), - @Step(value = "Validate Kafka service discovery annotation", expected = "The discovery annotation is validated successfully") + @Step(value = "Create Kafka resources with wait.", expected = "Kafka broker, controller, and topic are created."), + @Step(value = "Log transmission message.", expected = "Transmission message is logged."), + @Step(value = "Produce and consume messages with plain clients.", expected = "Messages are successfully produced and consumed."), + @Step(value = "Validate Kafka service discovery annotation.", expected = "The discovery annotation is validated successfully.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -180,12 +180,12 @@ void testSendMessagesPlainAnonymous() { @TestDoc( description = @Desc("Test sending messages over tls transport using mutual tls auth."), steps = { - @Step(value = "Create Kafka node pool resources", expected = "Persistent storage node pools are created"), - @Step(value = "Disable plain listener and enable tls listener in Kafka resource", expected = "Kafka with plain listener disabled and tls listener enabled is created"), - @Step(value = "Create Kafka topic and user", expected = "Kafka topic and tls user are created"), - @Step(value = "Configure and deploy Kafka clients", expected = "Kafka clients producer and consumer with tls are deployed"), - @Step(value = "Wait for clients to successfully send and receive messages", expected = "Clients successfully send and receive messages over tls"), - @Step(value = "Assert that the service discovery contains expected info", expected = "Service discovery matches expected info") + @Step(value = "Create Kafka node pool resources.", expected = "Persistent storage node pools are created."), + @Step(value = "Disable plain listener and enable tls listener in Kafka resource.", expected = "Kafka with plain listener disabled and tls listener enabled is created."), + @Step(value = "Create Kafka topic and user.", expected = "Kafka topic and tls user are created."), + @Step(value = "Configure and deploy Kafka clients.", expected = "Kafka clients producer and consumer with tls are deployed."), + @Step(value = "Wait for clients to successfully send and receive messages.", expected = "Clients successfully send and receive messages over tls."), + @Step(value = "Assert that the service discovery contains expected info.", expected = "Service discovery matches expected info.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -258,12 +258,12 @@ void testSendMessagesTlsAuthenticated() { @TestDoc( description = @Desc("Test sending messages over plain transport using scram sha auth."), steps = { - @Step(value = "Create Kafka brokers and controllers", expected = "Kafka brokers and controllers are created"), - @Step(value = "Enable Kafka with plain listener disabled and scram sha auth", expected = "Kafka instance with scram sha auth is enabled on a specified listener"), - @Step(value = "Set up topic and user", expected = "Kafka topic and Kafka user are set up with scram sha auth credentials"), - @Step(value = "Check logs in broker pod for authentication", expected = "Logs show that scram sha authentication succeeded"), - @Step(value = "Send messages over plain transport using scram sha authentication", expected = "Messages are successfully sent over plain transport using scram sha auth"), - @Step(value = "Verify service discovery annotation", expected = "Service discovery annotation is checked and validated") + @Step(value = "Create Kafka brokers and controllers.", expected = "Kafka brokers and controllers are created."), + @Step(value = "Enable Kafka with plain listener disabled and scram sha auth.", expected = "Kafka instance with scram sha auth is enabled on a specified listener."), + @Step(value = "Set up topic and user.", expected = "Kafka topic and Kafka user are set up with scram sha auth credentials."), + @Step(value = "Check logs in broker pod for authentication.", expected = "Logs show that scram sha authentication succeeded."), + @Step(value = "Send messages over plain transport using scram sha authentication.", expected = "Messages are successfully sent over plain transport using scram sha auth."), + @Step(value = "Verify service discovery annotation.", expected = "Service discovery annotation is checked and validated.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -335,12 +335,12 @@ void testSendMessagesPlainScramSha() { @TestDoc( description = @Desc("Test sending messages over TLS transport using SCRAM-SHA authentication."), steps = { - @Step(value = "Create resources for Kafka node pools", expected = "Kafka node pools are created"), - @Step(value = "Create Kafka cluster with SCRAM-SHA-512 authentication", expected = "Kafka cluster is created with SCRAM-SHA authentication"), - @Step(value = "Create Kafka topic and user", expected = "Kafka topic and user are created"), - @Step(value = "Transmit messages over TLS using SCRAM-SHA", expected = "Messages are successfully transmitted"), - @Step(value = "Check if generated password has the expected length", expected = "Password length is as expected"), - @Step(value = "Verify Kafka service discovery annotation", expected = "Service discovery annotation is as expected") + @Step(value = "Create resources for Kafka node pools.", expected = "Kafka node pools are created."), + @Step(value = "Create Kafka cluster with SCRAM-SHA-512 authentication.", expected = "Kafka cluster is created with SCRAM-SHA authentication."), + @Step(value = "Create Kafka topic and user.", expected = "Kafka topic and user are created."), + @Step(value = "Transmit messages over TLS using SCRAM-SHA.", expected = "Messages are successfully transmitted."), + @Step(value = "Check if generated password has the expected length.", expected = "Password length is as expected."), + @Step(value = "Verify Kafka service discovery annotation.", expected = "Service discovery annotation is as expected.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -417,10 +417,10 @@ void testSendMessagesTlsScramSha() { @TestDoc( description = @Desc("Test custom listener configured with scram SHA authentication and TLS."), steps = { - @Step(value = "Create a Kafka cluster with broker and controller node pools", expected = "Kafka cluster is created with node pools"), - @Step(value = "Create a Kafka cluster with custom listener using TLS and SCRAM-SHA authentication", expected = "Kafka cluster with custom listener is ready"), - @Step(value = "Create a Kafka topic and SCRAM-SHA user", expected = "Kafka topic and user are created"), - @Step(value = "Transmit messages over TLS using SCRAM-SHA authentication", expected = "Messages are transmitted successfully") + @Step(value = "Create a Kafka cluster with broker and controller node pools.", expected = "Kafka cluster is created with node pools."), + @Step(value = "Create a Kafka cluster with custom listener using TLS and SCRAM-SHA authentication.", expected = "Kafka cluster with custom listener is ready."), + @Step(value = "Create a Kafka topic and SCRAM-SHA user.", expected = "Kafka topic and user are created."), + @Step(value = "Transmit messages over TLS using SCRAM-SHA authentication.", expected = "Messages are transmitted successfully.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -477,11 +477,11 @@ void testSendMessagesCustomListenerTlsScramSha() { @TestDoc( description = @Desc("Test checking the functionality of Kafka cluster with NodePort external listener configurations."), steps = { - @Step(value = "Create resource with Kafka broker pool and controller pool", expected = "Resources with Kafka pools are created successfully"), - @Step(value = "Create Kafka cluster with NodePort and TLS listeners", expected = "Kafka cluster is set up with the specified listeners"), - @Step(value = "Create ExternalKafkaClient and verify message production and consumption", expected = "Messages are produced and consumed successfully"), - @Step(value = "Check Kafka status for proper listener addresses", expected = "Listener addresses in Kafka status are validated successfully"), - @Step(value = "Check ClusterRoleBinding annotations and labels in Kafka cluster", expected = "Annotations and labels match the expected values") + @Step(value = "Create resource with Kafka broker pool and controller pool.", expected = "Resources with Kafka pools are created successfully."), + @Step(value = "Create Kafka cluster with NodePort and TLS listeners.", expected = "Kafka cluster is set up with the specified listeners."), + @Step(value = "Create ExternalKafkaClient and verify message production and consumption.", expected = "Messages are produced and consumed successfully."), + @Step(value = "Check Kafka status for proper listener addresses.", expected = "Listener addresses in Kafka status are validated successfully."), + @Step(value = "Check ClusterRoleBinding annotations and labels in Kafka cluster.", expected = "Annotations and labels match the expected values.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -672,7 +672,7 @@ void testOverrideNodePortConfiguration() { @TestDoc( description = @Desc("Test the NodePort TLS functionality for Kafka brokers in a Kubernetes environment."), steps = { - @Step(value = "Create Kafka broker and controller node pools", expected = "Broker and controller node pools are created"), + @Step(value = "Create Kafka broker and controller node pools.", expected = "Broker and controller node pools are created"), @Step(value = "Deploy Kafka cluster with NodePort listener and TLS enabled", expected = "Kafka cluster is deployed with NodePort listener and TLS"), @Step(value = "Create a Kafka topic", expected = "Kafka topic is created"), @Step(value = "Create a Kafka user with TLS authentication", expected = "Kafka user with TLS authentication is created"), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java index ddcc257a801..3c8084c63e6 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java @@ -74,7 +74,7 @@ public class MultipleListenersST extends AbstractST { @TestDoc( description = @Desc("Test verifying the functionality of using multiple NodePorts in a Kafka cluster within the same namespace."), steps = { - @Step(value = "Execute listener tests with NodePort configuration", expected = "Listener tests run without issues using NodePort") + @Step(value = "Execute listener tests with NodePort configuration.", expected = "Listener tests run without issues using NodePort.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -89,7 +89,7 @@ void testMultipleNodePorts() { @TestDoc( description = @Desc("Test to verify the usage of more than one Kafka cluster within a single namespace."), steps = { - @Step(value = "Run the internal Kafka listeners test", expected = "Listeners test runs successfully on the specified cluster") + @Step(value = "Run the internal Kafka listeners test.", expected = "Listeners test runs successfully on the specified cluster.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -107,9 +107,9 @@ void testMultipleInternal() { @TestDoc( description = @Desc("Test verifying the combination of internal and external Kafka listeners."), steps = { - @Step(value = "Check if the environment supports cluster-wide NodePort rights", expected = "Test is skipped if the environment is not suitable"), - @Step(value = "Retrieve and combine internal and NodePort listeners", expected = "Listeners are successfully retrieved and combined"), - @Step(value = "Run listeners test with combined listeners", expected = "Listeners test is executed successfully") + @Step(value = "Check if the environment supports cluster-wide NodePort rights.", expected = "Test is skipped if the environment is not suitable."), + @Step(value = "Retrieve and combine internal and NodePort listeners.", expected = "Listeners are successfully retrieved and combined."), + @Step(value = "Run listeners test with combined listeners.", expected = "Listeners test is executed successfully.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -138,8 +138,8 @@ void testCombinationOfInternalAndExternalListeners() { @TestDoc( description = @Desc("Test verifying the behavior of multiple load balancers in a single namespace using more than one Kafka cluster."), steps = { - @Step(value = "Run listeners test with LOADBALANCER type", expected = "Listeners test executes successfully with load balancers"), - @Step(value = "Validate the results", expected = "Results match the expected outcomes for multiple load balancers") + @Step(value = "Run listeners test with LOADBALANCER type.", expected = "Listeners test executes successfully with load balancers."), + @Step(value = "Validate the results.", expected = "Results match the expected outcomes for multiple load balancers.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -157,8 +157,8 @@ void testMultipleLoadBalancers() { @TestDoc( description = @Desc("Test to verify the functionality of multiple Kafka route listeners in a single namespace."), steps = { - @Step(value = "Retrieve test cases for Kafka Listener Type ROUTE", expected = "Test cases for ROUTE are retrieved"), - @Step(value = "Run listener tests using the retrieved test cases and cluster name", expected = "Listener tests run successfully with no errors") + @Step(value = "Retrieve test cases for Kafka Listener Type ROUTE.", expected = "Test cases for ROUTE are retrieved."), + @Step(value = "Run listener tests using the retrieved test cases and cluster name.", expected = "Listener tests run successfully with no errors.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -176,10 +176,10 @@ void testMultipleRoutes() { @TestDoc( description = @Desc("Test ensuring that different types of external Kafka listeners (ROUTE and NODEPORT) work correctly when mixed."), steps = { - @Step(value = "Retrieve route listeners", expected = "Route listeners are retrieved from test cases"), - @Step(value = "Retrieve nodeport listeners", expected = "Nodeport listeners are retrieved from test cases"), - @Step(value = "Combine route and nodeport listeners", expected = "Multiple different listeners list is populated"), - @Step(value = "Run listeners test", expected = "Listeners test runs using the combined list") + @Step(value = "Retrieve route listeners.", expected = "Route listeners are retrieved from test cases."), + @Step(value = "Retrieve nodeport listeners.", expected = "Nodeport listeners are retrieved from test cases."), + @Step(value = "Combine route and nodeport listeners.", expected = "Multiple different listeners list is populated."), + @Step(value = "Run listeners test.", expected = "Listeners test runs using the combined list.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -207,9 +207,9 @@ void testMixtureOfExternalListeners() { @TestDoc( description = @Desc("Verifies the combination of every kind of Kafka listener: INTERNAL, NODEPORT, ROUTE, and LOADBALANCER."), steps = { - @Step(value = "Retrieve different types of Kafka listeners", expected = "Lists of INTERNAL, NODEPORT, ROUTE, and LOADBALANCER listeners are retrieved"), - @Step(value = "Combine all different listener lists", expected = "A combined list of all Kafka listener types is created"), - @Step(value = "Run listeners test with combined listener list", expected = "Listeners test runs with all types of Kafka listeners in the combined list") + @Step(value = "Retrieve different types of Kafka listeners.", expected = "Lists of INTERNAL, NODEPORT, ROUTE, and LOADBALANCER listeners are retrieved."), + @Step(value = "Combine all different listener lists.", expected = "A combined list of all Kafka listener types is created."), + @Step(value = "Run listeners test with combined listener list.", expected = "Listeners test runs with all types of Kafka listeners in the combined list.") }, labels = { @Label(value = TestDocsLabels.KAFKA) From fda56251784690656785774e9f1be6bb8d50fe2d Mon Sep 17 00:00:00 2001 From: see-quick Date: Thu, 3 Oct 2024 14:04:27 +0200 Subject: [PATCH 05/12] update Lukas review Signed-off-by: see-quick --- .../io.strimzi.systemtest.connect.ConnectST.md | 10 +++++----- .../io.strimzi.systemtest.kafka.ConfigProviderST.md | 2 +- .../io.strimzi.systemtest.kafka.KafkaNodePoolST.md | 10 +++++----- .../systemtests/io.strimzi.systemtest.kafka.KafkaST.md | 4 ++-- .../io.strimzi.systemtest.kafka.QuotasST.md | 4 ++-- .../io.strimzi.systemtest.kafka.TieredStorageST.md | 2 +- ...o.strimzi.systemtest.kafka.listeners.ListenersST.md | 10 +++++----- .../java/io/strimzi/systemtest/connect/ConnectST.java | 10 +++++----- .../io/strimzi/systemtest/kafka/ConfigProviderST.java | 2 +- .../io/strimzi/systemtest/kafka/KafkaNodePoolST.java | 10 +++++----- .../test/java/io/strimzi/systemtest/kafka/KafkaST.java | 4 ++-- .../java/io/strimzi/systemtest/kafka/QuotasST.java | 4 ++-- .../io/strimzi/systemtest/kafka/TieredStorageST.java | 2 +- .../systemtest/kafka/listeners/ListenersST.java | 10 +++++----- 14 files changed, 42 insertions(+), 42 deletions(-) diff --git a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md index 474e5da864f..b1fc2fedd24 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md @@ -132,7 +132,7 @@ | - | - | - | | 1. | Create TestStorage instance | TestStorage instance is created | | 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | -| 3. | Create broker and controller node pools | Node pools are created and ready | +| 3. | Create broker and controller KafkaNodePools. | Node pools are created and ready | | 4. | Create Kafka cluster | Kafka cluster is created and operational | | 5. | Setup JVM options and resource requirements for Kafka Connect | Kafka Connect is configured with specified JVM options and resources | | 6. | Verify JVM options and resource requirements | JVM options and resource requests/limits are correctly applied to the Kafka Connect pod | @@ -279,7 +279,7 @@ | Step | Action | Result | | - | - | - | | 1. | Initialize test storage and determine connect cluster name | Test storage and cluster name properly initialized | -| 2. | Create broker and controller node pools | Broker and controller node pools created successfully | +| 2. | Create broker and controller KafkaNodePools. | Broker and controller node pools created successfully | | 3. | Deploy Kafka cluster in ephemeral mode | Kafka cluster deployed successfully | | 4. | Create Kafka Connect cluster with default image | Kafka Connect cluster created with appropriate configuration | | 5. | Create and configure Kafka Connector | Kafka Connector deployed and configured with correct settings | @@ -322,7 +322,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create TestStorage instance | TestStorage instance is created with context | -| 2. | Create broker and controller node pools | Broker and Controller node pools are created | +| 2. | Create broker and controller KafkaNodePools. | Broker and Controller node pools are created | | 3. | Create ephemeral Kafka cluster | Kafka cluster with 3 replicas is created | | 4. | Create Kafka Connect with file plugin | Kafka Connect is created with 2 replicas and file plugin | | 5. | Create Kafka Connector | Kafka Connector is created with necessary configurations | @@ -346,7 +346,7 @@ | Step | Action | Result | | - | - | - | | 1. | Initialize TestStorage and create namespace | Namespace and storage initialized | -| 2. | Create broker and controller node pools | Node pools created with 3 replicas. | +| 2. | Create broker and controller KafkaNodePools. | Node pools created with 3 replicas. | | 3. | Create ephemeral Kafka cluster | Kafka cluster created with 3 replicas. | | 4. | Create KafkaConnect resource with 2 replicas | KafkaConnect resource created with 2 replicas. | | 5. | Verify that KafkaConnect has 2 pods | 2 KafkaConnect pods are running. | @@ -368,7 +368,7 @@ | Step | Action | Result | | - | - | - | | 1. | Initialize test storage | Instances created successfully | -| 2. | Create Kafka node pools (broker and controller) | Node pools created and ready | +| 2. | Create KafkaNodePool (broker and controller) | Node pools created and ready | | 3. | Deploy Kafka cluster with TLS and SCRAM-SHA-512 authentication | Kafka cluster deployed with listeners configured | | 4. | Create Kafka user with SCRAM-SHA-512 | User created successfully | | 5. | Deploy Kafka topic | Topic created successfully | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md index 88c173c237c..b9b1eba5107 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md @@ -22,7 +22,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create broker and controller pools. | Resources are created and are in ready state. | +| 1. | Create broker and controller KafkaNodePools. | Resources are created and are in ready state. | | 2. | Create Kafka cluster. | Kafka cluster is ready with 3 brokers. | | 3. | Create ConfigMap for connector configuration. | ConfigMap with connector configuration is created. | | 4. | Deploy Kafka Connect with external configuration. | Kafka Connect is deployed with proper configuration. | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md index b4749caf7de..b0c1bda07fa 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md @@ -1,12 +1,12 @@ # KafkaNodePoolST -**Description:** This test suite verifies various functionalities of Kafka node pools in a Kafka cluster. +**Description:** This test suite verifies various functionalities of KafkaNodePools in a Kafka cluster. **Before tests execution steps:** | Step | Action | Result | | - | - | - | -| 1. | Ensure the environment is not using OLM or Helm and Kafka node pools are enabled. | Environment is validated. | +| 1. | Ensure the environment is not using OLM or Helm and KafkaNodePools are enabled. | Environment is validated. | | 2. | Install the default Cluster Operator. | Cluster operator is installed. | **Labels:** @@ -38,7 +38,7 @@ ## testKafkaNodePoolBrokerIdsManagementUsingAnnotations -**Description:** This test case verifies the management of broker IDs in Kafka node pools using annotations. +**Description:** This test case verifies the management of broker IDs in KafkaNodePools using annotations. **Steps:** @@ -56,7 +56,7 @@ ## testNodePoolsAdditionAndRemoval -**Description:** This test case verifies the possibility of adding and removing Kafka node pools into an existing Kafka cluster. +**Description:** This test case verifies the possibility of adding and removing KafkaNodePools into an existing Kafka cluster. **Steps:** @@ -76,7 +76,7 @@ ## testNodePoolsRolesChanging -**Description:** This test case verifies changing of roles in Kafka node pools. +**Description:** This test case verifies changing of roles in KafkaNodePools. **Steps:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md index beb780975be..97bed6d7fa5 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md @@ -41,7 +41,7 @@ | Step | Action | Result | | - | - | - | | 1. | Initialize test storage with current context. | Test storage is initialized. | -| 2. | Create Kafka node pools | Kafka node pools are created and ready | +| 2. | Create KafkaNodePools | KafkaNodePools are created and ready | | 3. | Deploy Kafka with a non-existing version | Kafka deployment with non-supported version begins | | 4. | Log Kafka deployment process | Log entry for Kafka deployment is created | | 5. | Wait for Kafka to not be ready | Kafka is not ready as expected | @@ -179,7 +179,7 @@ ## testRemoveComponentsFromEntityOperator -**Description:** his test case verifies the correct deployment of the Entity Operator, including both the User Operator and Topic Operator. First, the Entity Operator is modified to exclude the User Operator. Then, it's restored to its default configuration, which includes the User Operator. Next, the Topic Operator is removed, followed by the User Operator, with the Topic Operator already excluded +**Description:** This test case verifies the correct deployment of the Entity Operator, including both the User Operator and Topic Operator. First, the Entity Operator is modified to exclude the User Operator. Then, it's restored to its default configuration, which includes the User Operator. Next, the Topic Operator is removed, followed by the User Operator, with the Topic Operator already excluded **Steps:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md index 737c583758c..1634c540467 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md @@ -23,7 +23,7 @@ | Step | Action | Result | | - | - | - | | 1. | Assume the cluster is not Minikube or MicroShift. | Cluster is appropriate for the test. | -| 2. | Create necessary resources for Kafka and nodes. | Resources are created and Kafka is set up with quotas plugin. | +| 2. | Create necessary resources for Kafka, including KafkaNodePools and persistent Kafka setup with quotas plugin. Configure producer and consumer quotas with specific byte rate limits, and define excluded principals to bypass the quotas. | Kafka and KafkaNodePools are created with quotas applied, and excluded principals are correctly configured. | | 3. | Send messages without any user; observe quota enforcement. | Producer stops after reaching the minimum available bytes. | | 4. | Check Kafka logs for quota enforcement message. | Kafka logs contain the expected quota enforcement message. | | 5. | Send messages with excluded user and observe the behavior. | Messages are sent successfully without hitting the quota. | @@ -43,7 +43,7 @@ | Step | Action | Result | | - | - | - | | 1. | Set excluded principal. | Principal is set. | -| 2. | Create Kafka resources including node pools and persistent Kafka with quotas enabled. | Kafka resources are created successfully with quotas setup. | +| 2. | Create Kafka resources including KafkaNodePools and persistent Kafka with quotas enabled. Configure producer and consumer byte rate limits and add excluded principals to bypass the quota enforcement. | Kafka resources are successfully created with proper quota configuration and excluded principals. | | 3. | Create Kafka topic and user with SCRAM-SHA authentication. | Kafka topic and SCRAM-SHA user are created successfully. | | 4. | Send messages with normal user. | Messages are sent and duration is measured. | | 5. | Send messages with excluded user. | Messages are sent and duration is measured. | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md index ca645249051..8477ed90ad4 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md @@ -7,7 +7,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create test namespace. | Namespace is created. | -| 2. | Build Kafka image based on passed parameters like image full name, base image, Dockerfile path (via Kaniko or OpenShift build). | Kafka image is built. | +| 2. | Build Kafka image based on passed parameters like image full name, base image, Dockerfile path (via Kaniko or OpenShift build), and include the Aiven Tiered Storage plugin from (tiered-storage-for-apache-kafka). | Kafka image is built with the Aiven Tiered Storage plugin integrated. | | 3. | Deploy Minio in test namespace and init the client inside the Minio pod. | Minio is deployed and client is initialized. | | 4. | Init bucket in Minio for purposes of these tests. | Bucket is initialized in Minio. | | 5. | Deploy Cluster Operator. | Cluster Operator is deployed. | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md index e8147445080..bb821722cc1 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md @@ -43,7 +43,7 @@ | - | - | - | | 1. | Define non-existing certificate name. | Non-existing certificate name is defined. | | 2. | Create a custom secret for Kafka with the defined certificate. | Custom secret created successfully. | -| 3. | Create Kafka node pools resources. | Kafka node pools resources created. | +| 3. | Create KafkaNodePool resources. | KafkaNodePool resources created. | | 4. | Create Kafka cluster with ephemeral storage and the non-existing certificate. | Kafka cluster creation initiated. | | 5. | Wait for controller pods to be ready if in non-KRaft mode. | Controller pods are ready. | | 6. | Wait until Kafka status message indicates missing certificate. | Error message about missing certificate is found in Kafka status condition. | @@ -63,7 +63,7 @@ | - | - | - | | 1. | Define the non-existing certificate key. | The non-existing certificate key string is defined. | | 2. | Create a custom secret with a certificate for Kafka server. | Custom secret is created in the namespace. | -| 3. | Create broker and controller resources with node pools. | Resources are created and ready. | +| 3. | Create broker and controller KafkaNodePools. | Resources are created and ready. | | 4. | Deploy a Kafka cluster with a listener using the custom secret and non-existing key. | Deployment initiated without waiting for the resources to be ready. | | 5. | If not in KRaft mode, wait for controller pods to be ready. | Controller pods are in ready state (if applicable). | | 6. | Check Kafka status condition for custom certificate error message. | Error message indicating the missing custom certificate private key is present in Kafka status conditions. | @@ -341,7 +341,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create and configure Kafka node pools | Node pools for brokers and controllers are created | +| 1. | Create and configure KafkaNodePools | Node pools for brokers and controllers are created | | 2. | Create and configure Kafka cluster with TLS listener | Kafka cluster with TLS enabled LoadBalancer listener is created | | 3. | Create and configure Kafka user with TLS authentication | Kafka user with TLS authentication is created | | 4. | Wait for the LoadBalancer address to be reachable | LoadBalancer address becomes reachable | @@ -422,7 +422,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create necessary Kafka node pools | Kafka node pools are created and initialized | +| 1. | Create necessary KafkaNodePools | KafkaNodePools are created and initialized | | 2. | Create Kafka cluster with a listener using non-existing certificate | Kafka cluster resource is initialized with non-existing TLS certificate | | 3. | Wait for pods to be ready if not in KRaft mode | Pods are ready | | 4. | Wait for Kafka status condition message indicating the non-existing secret | Correct error message regarding the non-existing secret appears | @@ -535,7 +535,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create resources for Kafka node pools. | Kafka node pools are created. | +| 1. | Create resources for KafkaNodePools. | KafkaNodePools are created. | | 2. | Create Kafka cluster with SCRAM-SHA-512 authentication. | Kafka cluster is created with SCRAM-SHA authentication. | | 3. | Create Kafka topic and user. | Kafka topic and user are created. | | 4. | Transmit messages over TLS using SCRAM-SHA. | Messages are successfully transmitted. | diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java index 4058b924918..05eb0f263e8 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java @@ -421,7 +421,7 @@ void testKafkaConnectAndConnectorFileSinkPlugin() { steps = { @Step(value = "Create TestStorage instance", expected = "TestStorage instance is created"), @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), - @Step(value = "Create broker and controller node pools", expected = "Node pools are created and ready"), + @Step(value = "Create broker and controller KafkaNodePools.", expected = "Node pools are created and ready"), @Step(value = "Create Kafka cluster", expected = "Kafka cluster is created and operational"), @Step(value = "Setup JVM options and resource requirements for Kafka Connect", expected = "Kafka Connect is configured with specified JVM options and resources"), @Step(value = "Verify JVM options and resource requirements", expected = "JVM options and resource requests/limits are correctly applied to the Kafka Connect pod") @@ -617,7 +617,7 @@ void testSecretsWithKafkaConnectWithTlsAndTlsClientAuthentication() { description = @Desc("Test validating Kafka Connect with TLS and SCRAM-SHA authentication along with associated resources setup and verification."), steps = { @Step(value = "Initialize test storage", expected = "Instances created successfully"), - @Step(value = "Create Kafka node pools (broker and controller)", expected = "Node pools created and ready"), + @Step(value = "Create KafkaNodePool (broker and controller)", expected = "Node pools created and ready"), @Step(value = "Deploy Kafka cluster with TLS and SCRAM-SHA-512 authentication", expected = "Kafka cluster deployed with listeners configured"), @Step(value = "Create Kafka user with SCRAM-SHA-512", expected = "User created successfully"), @Step(value = "Deploy Kafka topic", expected = "Topic created successfully"), @@ -934,7 +934,7 @@ void testCustomAndUpdatedValues() { description = @Desc("Test validating multi-node Kafka Connect cluster creation, connector deployment, and message processing."), steps = { @Step(value = "Initialize test storage and determine connect cluster name", expected = "Test storage and cluster name properly initialized"), - @Step(value = "Create broker and controller node pools", expected = "Broker and controller node pools created successfully"), + @Step(value = "Create broker and controller KafkaNodePools.", expected = "Broker and controller node pools created successfully"), @Step(value = "Deploy Kafka cluster in ephemeral mode", expected = "Kafka cluster deployed successfully"), @Step(value = "Create Kafka Connect cluster with default image", expected = "Kafka Connect cluster created with appropriate configuration"), @Step(value = "Create and configure Kafka Connector", expected = "Kafka Connector deployed and configured with correct settings"), @@ -1187,7 +1187,7 @@ void testConnectScramShaAuthWithWeirdUserName() { description = @Desc("Test to validate scaling KafkaConnect without a connector to zero replicas."), steps = { @Step(value = "Initialize TestStorage and create namespace", expected = "Namespace and storage initialized"), - @Step(value = "Create broker and controller node pools", expected = "Node pools created with 3 replicas."), + @Step(value = "Create broker and controller KafkaNodePools.", expected = "Node pools created with 3 replicas."), @Step(value = "Create ephemeral Kafka cluster", expected = "Kafka cluster created with 3 replicas."), @Step(value = "Create KafkaConnect resource with 2 replicas", expected = "KafkaConnect resource created with 2 replicas."), @Step(value = "Verify that KafkaConnect has 2 pods", expected = "2 KafkaConnect pods are running."), @@ -1235,7 +1235,7 @@ void testScaleConnectWithoutConnectorToZero() { description = @Desc("Test scaling Kafka Connect with a connector to zero replicas."), steps = { @Step(value = "Create TestStorage instance", expected = "TestStorage instance is created with context"), - @Step(value = "Create broker and controller node pools", expected = "Broker and Controller node pools are created"), + @Step(value = "Create broker and controller KafkaNodePools.", expected = "Broker and Controller node pools are created"), @Step(value = "Create ephemeral Kafka cluster", expected = "Kafka cluster with 3 replicas is created"), @Step(value = "Create Kafka Connect with file plugin", expected = "Kafka Connect is created with 2 replicas and file plugin"), @Step(value = "Create Kafka Connector", expected = "Kafka Connector is created with necessary configurations"), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java index 15bc7f27615..8b79ad88565 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java @@ -61,7 +61,7 @@ public class ConfigProviderST extends AbstractST { @TestDoc( description = @Desc("Tests to ensure Kafka Connect functions correctly using ConfigMap and EnvVar configuration."), steps = { - @Step(value = "Create broker and controller pools.", expected = "Resources are created and are in ready state."), + @Step(value = "Create broker and controller KafkaNodePools.", expected = "Resources are created and are in ready state."), @Step(value = "Create Kafka cluster.", expected = "Kafka cluster is ready with 3 brokers."), @Step(value = "Create ConfigMap for connector configuration.", expected = "ConfigMap with connector configuration is created."), @Step(value = "Deploy Kafka Connect with external configuration.", expected = "Kafka Connect is deployed with proper configuration."), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java index bd0985e1ebc..388703926f0 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java @@ -56,9 +56,9 @@ @Tag(REGRESSION) @SuiteDoc( - description = @Desc("This test suite verifies various functionalities of Kafka node pools in a Kafka cluster."), + description = @Desc("This test suite verifies various functionalities of KafkaNodePools in a Kafka cluster."), beforeTestSteps = { - @Step(value = "Ensure the environment is not using OLM or Helm and Kafka node pools are enabled.", expected = "Environment is validated."), + @Step(value = "Ensure the environment is not using OLM or Helm and KafkaNodePools are enabled.", expected = "Environment is validated."), @Step(value = "Install the default Cluster Operator.", expected = "Cluster operator is installed.") }, labels = { @@ -70,7 +70,7 @@ public class KafkaNodePoolST extends AbstractST { @ParallelNamespaceTest @TestDoc( - description = @Desc("This test case verifies the management of broker IDs in Kafka node pools using annotations."), + description = @Desc("This test case verifies the management of broker IDs in KafkaNodePools using annotations."), steps = { @Step(value = "Deploy a Kafka instance with annotations to manage node pools and one initial node pool to hold topics and act as controller.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool custom resource, with IDs 90, 91."), @Step(value = "Deploy additional 2 node pools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]).", expected = "node pools are deployed, node pool A contains ID 4, node pool B contains IDs 6, 0."), @@ -174,7 +174,7 @@ void testKafkaNodePoolBrokerIdsManagementUsingAnnotations() { @ParallelNamespaceTest @TestDoc( - description = @Desc("This test case verifies changing of roles in Kafka node pools."), + description = @Desc("This test case verifies changing of roles in KafkaNodePools."), steps = { @Step(value = "Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools, both with mixed role, first one stable, second one which will be modified.", expected = "Kafka instance with initial node pools is deployed."), @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present.", expected = "KafkaTopic is created."), @@ -255,7 +255,7 @@ void testNodePoolsRolesChanging() { @ParallelNamespaceTest @TestDoc( - description = @Desc("This test case verifies the possibility of adding and removing Kafka node pools into an existing Kafka cluster."), + description = @Desc("This test case verifies the possibility of adding and removing KafkaNodePools into an existing Kafka cluster."), steps = { @Step(value = "Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool custom resource."), @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully, KafkaTopic created and cleaned as expected."), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java index b2a8b52520b..35bcc53acc3 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java @@ -310,7 +310,7 @@ void testJvmAndResources() { @ParallelNamespaceTest @TestDoc( - description = @Desc("his test case verifies the correct deployment of the Entity Operator, including both the User Operator and Topic Operator. First, the Entity Operator is modified to exclude the User Operator. Then, it's restored to its default configuration, which includes the User Operator. Next, the Topic Operator is removed, followed by the User Operator, with the Topic Operator already excluded"), + description = @Desc("This test case verifies the correct deployment of the Entity Operator, including both the User Operator and Topic Operator. First, the Entity Operator is modified to exclude the User Operator. Then, it's restored to its default configuration, which includes the User Operator. Next, the Topic Operator is removed, followed by the User Operator, with the Topic Operator already excluded"), steps = { @Step(value = "Deploy Kafka with Entity Operator set.", expected = "Kafka is deployed, and Entity Operator consists of both Topic Operator and User Operator."), @Step(value = "Remove User Operator from the Kafka specification.", expected = "User Operator container is deleted."), @@ -1060,7 +1060,7 @@ void testReadOnlyRootFileSystem() { description = @Desc("Test to ensure that deploying Kafka with an unsupported version results in the expected error."), steps = { @Step(value = "Initialize test storage with current context.", expected = "Test storage is initialized."), - @Step(value = "Create Kafka node pools", expected = "Kafka node pools are created and ready"), + @Step(value = "Create KafkaNodePools", expected = "KafkaNodePools are created and ready"), @Step(value = "Deploy Kafka with a non-existing version", expected = "Kafka deployment with non-supported version begins"), @Step(value = "Log Kafka deployment process", expected = "Log entry for Kafka deployment is created"), @Step(value = "Wait for Kafka to not be ready", expected = "Kafka is not ready as expected"), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java index 11027e0a01a..c8035a138f8 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java @@ -60,7 +60,7 @@ public class QuotasST extends AbstractST { description = @Desc("Test to check Kafka Quotas Plugin for disk space."), steps = { @Step(value = "Assume the cluster is not Minikube or MicroShift.", expected = "Cluster is appropriate for the test."), - @Step(value = "Create necessary resources for Kafka and nodes.", expected = "Resources are created and Kafka is set up with quotas plugin."), + @Step(value = "Create necessary resources for Kafka, including KafkaNodePools and persistent Kafka setup with quotas plugin. Configure producer and consumer quotas with specific byte rate limits, and define excluded principals to bypass the quotas.", expected = "Kafka and KafkaNodePools are created with quotas applied, and excluded principals are correctly configured."), @Step(value = "Send messages without any user; observe quota enforcement.", expected = "Producer stops after reaching the minimum available bytes."), @Step(value = "Check Kafka logs for quota enforcement message.", expected = "Kafka logs contain the expected quota enforcement message."), @Step(value = "Send messages with excluded user and observe the behavior.", expected = "Messages are sent successfully without hitting the quota."), @@ -152,7 +152,7 @@ void testKafkaQuotasPluginIntegration() { description = @Desc("Test verifying bandwidth limitations with Kafka quotas plugin."), steps = { @Step(value = "Set excluded principal.", expected = "Principal is set."), - @Step(value = "Create Kafka resources including node pools and persistent Kafka with quotas enabled.", expected = "Kafka resources are created successfully with quotas setup."), + @Step(value = "Create Kafka resources including KafkaNodePools and persistent Kafka with quotas enabled. Configure producer and consumer byte rate limits and add excluded principals to bypass the quota enforcement.", expected = "Kafka resources are successfully created with proper quota configuration and excluded principals."), @Step(value = "Create Kafka topic and user with SCRAM-SHA authentication.", expected = "Kafka topic and SCRAM-SHA user are created successfully."), @Step(value = "Send messages with normal user.", expected = "Messages are sent and duration is measured."), @Step(value = "Send messages with excluded user.", expected = "Messages are sent and duration is measured."), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java index 66fa28061ea..324ab20eaf6 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java @@ -53,7 +53,7 @@ description = @Desc("This test suite covers scenarios for Tiered Storage integration implemented within Strimzi."), beforeTestSteps = { @Step(value = "Create test namespace.", expected = "Namespace is created."), - @Step(value = "Build Kafka image based on passed parameters like image full name, base image, Dockerfile path (via Kaniko or OpenShift build).", expected = "Kafka image is built."), + @Step(value = "Build Kafka image based on passed parameters like image full name, base image, Dockerfile path (via Kaniko or OpenShift build), and include the Aiven Tiered Storage plugin from (tiered-storage-for-apache-kafka).", expected = "Kafka image is built with the Aiven Tiered Storage plugin integrated."), @Step(value = "Deploy Minio in test namespace and init the client inside the Minio pod.", expected = "Minio is deployed and client is initialized."), @Step(value = "Init bucket in Minio for purposes of these tests.", expected = "Bucket is initialized in Minio."), @Step(value = "Deploy Cluster Operator.", expected = "Cluster Operator is deployed.") diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java index 1a4816e8195..a75272ba5a5 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java @@ -335,7 +335,7 @@ void testSendMessagesPlainScramSha() { @TestDoc( description = @Desc("Test sending messages over TLS transport using SCRAM-SHA authentication."), steps = { - @Step(value = "Create resources for Kafka node pools.", expected = "Kafka node pools are created."), + @Step(value = "Create resources for KafkaNodePools.", expected = "KafkaNodePools are created."), @Step(value = "Create Kafka cluster with SCRAM-SHA-512 authentication.", expected = "Kafka cluster is created with SCRAM-SHA authentication."), @Step(value = "Create Kafka topic and user.", expected = "Kafka topic and user are created."), @Step(value = "Transmit messages over TLS using SCRAM-SHA.", expected = "Messages are successfully transmitted."), @@ -791,7 +791,7 @@ void testLoadBalancer() { @TestDoc( description = @Desc("Test validating the TLS connection through a Kafka LoadBalancer."), steps = { - @Step(value = "Create and configure Kafka node pools", expected = "Node pools for brokers and controllers are created"), + @Step(value = "Create and configure KafkaNodePools", expected = "Node pools for brokers and controllers are created"), @Step(value = "Create and configure Kafka cluster with TLS listener", expected = "Kafka cluster with TLS enabled LoadBalancer listener is created"), @Step(value = "Create and configure Kafka user with TLS authentication", expected = "Kafka user with TLS authentication is created"), @Step(value = "Wait for the LoadBalancer address to be reachable", expected = "LoadBalancer address becomes reachable"), @@ -2404,7 +2404,7 @@ void testCustomCertRouteAndTlsRollingUpdate() { @TestDoc( description = @Desc("Test for verifying non-existing custom certificate handling by creating necessary resources and ensuring correct error message check."), steps = { - @Step(value = "Create necessary Kafka node pools", expected = "Kafka node pools are created and initialized"), + @Step(value = "Create necessary KafkaNodePools", expected = "KafkaNodePools are created and initialized"), @Step(value = "Create Kafka cluster with a listener using non-existing certificate", expected = "Kafka cluster resource is initialized with non-existing TLS certificate"), @Step(value = "Wait for pods to be ready if not in KRaft mode", expected = "Pods are ready"), @Step(value = "Wait for Kafka status condition message indicating the non-existing secret", expected = "Correct error message regarding the non-existing secret appears") @@ -2456,7 +2456,7 @@ void testNonExistingCustomCertificate() { steps = { @Step(value = "Define non-existing certificate name.", expected = "Non-existing certificate name is defined."), @Step(value = "Create a custom secret for Kafka with the defined certificate.", expected = "Custom secret created successfully."), - @Step(value = "Create Kafka node pools resources.", expected = "Kafka node pools resources created."), + @Step(value = "Create KafkaNodePool resources.", expected = "KafkaNodePool resources created."), @Step(value = "Create Kafka cluster with ephemeral storage and the non-existing certificate.", expected = "Kafka cluster creation initiated."), @Step(value = "Wait for controller pods to be ready if in non-KRaft mode.", expected = "Controller pods are ready."), @Step(value = "Wait until Kafka status message indicates missing certificate.", expected = "Error message about missing certificate is found in Kafka status condition.") @@ -2512,7 +2512,7 @@ void testCertificateWithNonExistingDataCrt() { steps = { @Step(value = "Define the non-existing certificate key.", expected = "The non-existing certificate key string is defined."), @Step(value = "Create a custom secret with a certificate for Kafka server.", expected = "Custom secret is created in the namespace."), - @Step(value = "Create broker and controller resources with node pools.", expected = "Resources are created and ready."), + @Step(value = "Create broker and controller KafkaNodePools.", expected = "Resources are created and ready."), @Step(value = "Deploy a Kafka cluster with a listener using the custom secret and non-existing key.", expected = "Deployment initiated without waiting for the resources to be ready."), @Step(value = "If not in KRaft mode, wait for controller pods to be ready.", expected = "Controller pods are in ready state (if applicable)."), @Step(value = "Check Kafka status condition for custom certificate error message.", expected = "Error message indicating the missing custom certificate private key is present in Kafka status conditions.") From d9f15170eb74ae9c9947abbb7db1b4826ce411dd Mon Sep 17 00:00:00 2001 From: see-quick Date: Thu, 3 Oct 2024 15:13:10 +0200 Subject: [PATCH 06/12] updated node pools to KafkaNodePools Signed-off-by: see-quick --- ...io.strimzi.systemtest.connect.ConnectST.md | 36 +++++++++---------- ...ystemtest.cruisecontrol.CruiseControlST.md | 2 +- .../strimzi/systemtest/connect/ConnectST.java | 36 +++++++++---------- .../cruisecontrol/CruiseControlST.java | 2 +- .../rollingupdate/KafkaRollerST.java | 2 +- 5 files changed, 39 insertions(+), 39 deletions(-) diff --git a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md index b1fc2fedd24..d9ba3a950e3 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md @@ -22,8 +22,8 @@ | Step | Action | Result | | - | - | - | -| 1. | Create resource with node pools | Node Pools created successfully | -| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | +| 1. | Create resource with KafkaNodePools | KafkaNodePools created successfully | +| 2. | Create KafkaNodePools using resourceManager based on the configuration | KafkaNodePools for broker and controller are created or not based on configuration | | 3. | Deploy Kafka cluster with SCRAM-SHA-512 authentication | Kafka cluster deployed with specified authentications | | 4. | Create Kafka Topic | Topic created successfully | | 5. | Create Kafka SCRAM-SHA-512 user with a weird username | User created successfully with SCRAM-SHA-512 credentials | @@ -46,7 +46,7 @@ | Step | Action | Result | | - | - | - | | 1. | Set up a name of username containing dots and 64 characters | | -| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | +| 2. | Create KafkaNodePools using resourceManager based on the configuration | KafkaNodePools for broker and controller are created or not based on configuration | | 3. | Create Kafka broker, controller, topic, and Kafka user with the specified username | Resources are created with the expected configurations | | 4. | Setup Kafka Connect with the created Kafka instance and TLS authentication | Kafka Connect is set up with the expected configurations | | 5. | Check if the user can produce messages to Kafka | Messages are produced successfully | @@ -91,7 +91,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create and configure Kafka Connect with initial values | Kafka Connect is created and configured with initial environment variables and readiness/liveness probes | -| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | +| 2. | Create KafkaNodePools using resourceManager based on the configuration | KafkaNodePools for broker and controller are created or not based on configuration | | 3. | Verify initial configuration and environment variables | Initial configuration and environment variables are as expected | | 4. | Update Kafka Connect configuration and environment variables | Kafka Connect configuration and environment variables are updated | | 5. | Verify updated configuration and environment variables | Updated configuration and environment variables are as expected | @@ -111,7 +111,7 @@ | - | - | - | | 1. | Initialize Test Storage. | Test storage instance is created with required context. | | 2. | Define expected configurations. | Configurations are loaded from properties file. | -| 3. | Create and wait for resources. | Kafka resources, including node pools and KafkaConnect instances, are created and become ready. | +| 3. | Create and wait for resources. | Kafka resources, including KafkaNodePools and KafkaConnect instances, are created and become ready. | | 4. | Annotate for manual rolling update. | KafkaConnect components are annotated for a manual rolling update. | | 5. | Perform and wait for rolling update. | KafkaConnect components roll and new pods are deployed. | | 6. | Kafka Connect pod. | Pod configurations and annotations are verified. | @@ -131,8 +131,8 @@ | Step | Action | Result | | - | - | - | | 1. | Create TestStorage instance | TestStorage instance is created | -| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | -| 3. | Create broker and controller KafkaNodePools. | Node pools are created and ready | +| 2. | Create KafkaNodePools using resourceManager based on the configuration | KafkaNodePools for broker and controller are created or not based on configuration | +| 3. | Create broker and controller KafkaNodePools. | KafkaNodePools are created and ready | | 4. | Create Kafka cluster | Kafka cluster is created and operational | | 5. | Setup JVM options and resource requirements for Kafka Connect | Kafka Connect is configured with specified JVM options and resources | | 6. | Verify JVM options and resource requirements | JVM options and resource requests/limits are correctly applied to the Kafka Connect pod | @@ -151,7 +151,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create and configure test storage | Test storage is set up with necessary configurations. | -| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | +| 2. | Create KafkaNodePools using resourceManager based on the configuration | KafkaNodePools for broker and controller are created or not based on configuration | | 3. | Create and wait for the broker and controller pools | Broker and controller pools are created and running. | | 4. | Deploy and configure Kafka Connect with File Sink Plugin | Kafka Connect with File Sink Plugin is deployed and configured. | | 5. | Deploy Network Policies for Kafka Connect | Network Policies are successfully deployed for Kafka Connect. | @@ -192,7 +192,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create TestStorage object instance | Instance of TestStorage is created | -| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | +| 2. | Create KafkaNodePools using resourceManager based on the configuration | KafkaNodePools for broker and controller are created or not based on configuration | | 3. | Create resources for KafkaNodePools and KafkaCluster | Resources are created and ready | | 4. | Deploy Kafka Connect with file plugin | Kafka Connect is deployed with 1 initial replica | | 5. | Verify the initial replica count | Initial replica count is verified to be 1 | @@ -215,7 +215,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create object instance of TestStorage. | Instance of TestStorage is created. | -| 2. | Create node pools using resourceManager based on the configuration. | node pools for broker and controller are created or not based on configuration. | +| 2. | Create KafkaNodePools using resourceManager based on the configuration. | KafkaNodePools for broker and controller are created or not based on configuration. | | 3. | Deploy Kafka with SCRAM-SHA-512 listener | Kafka is deployed with the specified listener authentication | | 4. | Create KafkaUser with SCRAM-SHA authentication | KafkaUser is created using SCRAM-SHA authentication with the given credentials | | 5. | Create KafkaTopic | KafkaTopic is created | @@ -238,7 +238,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | +| 1. | Create KafkaNodePools using resourceManager based on the configuration | KafkaNodePools for broker and controller are created or not based on configuration | | 2. | Create Kafka cluster with SCRAM-SHA authentication | Kafka cluster is created with SCRAM-SHA authentication enabled | | 3. | Create a Kafka user with SCRAM-SHA authentication | Kafka user with SCRAM-SHA authentication is created | | 4. | Deploy Kafka Connect with the created user credentials | Kafka Connect is deployed successfully | @@ -260,7 +260,7 @@ | - | - | - | | 1. | Create Secrets and ConfigMaps | Secrets and ConfigMaps are created successfully. | | 2. | Create Kafka environment | Kafka broker, Kafka Connect, and other resources are deployed successfully. | -| 3. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | +| 3. | Create KafkaNodePools using resourceManager based on the configuration | KafkaNodePools for broker and controller are created or not based on configuration | | 4. | Bind Secrets and ConfigMaps to Kafka Connect | Secrets and ConfigMaps are bound to Kafka Connect as volumes and environment variables. | | 5. | Verify environment variables | Kafka Connect environment variables contain expected values from Secrets and ConfigMaps. | | 6. | Verify mounted volumes | Kafka Connect mounted volumes contain expected values from Secrets and ConfigMaps. | @@ -279,7 +279,7 @@ | Step | Action | Result | | - | - | - | | 1. | Initialize test storage and determine connect cluster name | Test storage and cluster name properly initialized | -| 2. | Create broker and controller KafkaNodePools. | Broker and controller node pools created successfully | +| 2. | Create broker and controller KafkaNodePools. | Broker and controller KafkaNodePools created successfully | | 3. | Deploy Kafka cluster in ephemeral mode | Kafka cluster deployed successfully | | 4. | Create Kafka Connect cluster with default image | Kafka Connect cluster created with appropriate configuration | | 5. | Create and configure Kafka Connector | Kafka Connector deployed and configured with correct settings | @@ -301,7 +301,7 @@ | Step | Action | Result | | - | - | - | | 1. | Initialize the test storage and create broker and controller pools | Broker and controller pools are created successfully | -| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | +| 2. | Create KafkaNodePools using resourceManager based on the configuration | KafkaNodePools for broker and controller are created or not based on configuration | | 3. | Deploy Kafka, Kafka Connect and Kafka Connector resources | Kafka, Kafka Connect and Kafka Connector resources are deployed successfully | | 4. | Scale Kafka Connect subresource | Kafka Connect subresource is scaled successfully | | 5. | Verify Kafka Connect subresource scaling | Kafka Connect replicas and observed generation are as expected | @@ -322,7 +322,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create TestStorage instance | TestStorage instance is created with context | -| 2. | Create broker and controller KafkaNodePools. | Broker and Controller node pools are created | +| 2. | Create broker and controller KafkaNodePools. | Broker and Controller KafkaNodePools are created | | 3. | Create ephemeral Kafka cluster | Kafka cluster with 3 replicas is created | | 4. | Create Kafka Connect with file plugin | Kafka Connect is created with 2 replicas and file plugin | | 5. | Create Kafka Connector | Kafka Connector is created with necessary configurations | @@ -346,7 +346,7 @@ | Step | Action | Result | | - | - | - | | 1. | Initialize TestStorage and create namespace | Namespace and storage initialized | -| 2. | Create broker and controller KafkaNodePools. | Node pools created with 3 replicas. | +| 2. | Create broker and controller KafkaNodePools. | KafkaNodePools created with 3 replicas. | | 3. | Create ephemeral Kafka cluster | Kafka cluster created with 3 replicas. | | 4. | Create KafkaConnect resource with 2 replicas | KafkaConnect resource created with 2 replicas. | | 5. | Verify that KafkaConnect has 2 pods | 2 KafkaConnect pods are running. | @@ -368,7 +368,7 @@ | Step | Action | Result | | - | - | - | | 1. | Initialize test storage | Instances created successfully | -| 2. | Create KafkaNodePool (broker and controller) | Node pools created and ready | +| 2. | Create KafkaNodePool (broker and controller) | KafkaNodePools created and ready | | 3. | Deploy Kafka cluster with TLS and SCRAM-SHA-512 authentication | Kafka cluster deployed with listeners configured | | 4. | Create Kafka user with SCRAM-SHA-512 | User created successfully | | 5. | Deploy Kafka topic | Topic created successfully | @@ -394,7 +394,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create test storage instance | Test storage instance is created | -| 2. | Create node pools using resourceManager based on the configuration | node pools for broker and controller are created or not based on configuration | +| 2. | Create KafkaNodePools using resourceManager based on the configuration | KafkaNodePools for broker and controller are created or not based on configuration | | 3. | Create resources for Kafka broker and Kafka Connect components | Resources are created and ready | | 4. | Configure Kafka broker with TLS listener and client authentication | Kafka broker is configured correctly | | 5. | Deploy Kafka user with TLS authentication | Kafka user is deployed with TLS authentication | diff --git a/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlST.md b/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlST.md index 2ac720a5757..e8b038783ca 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlST.md @@ -56,7 +56,7 @@ ## testCruiseControlDuringBrokerScaleUpAndDown -**Description:** Testing the behavior of Cruise Control during both scaling up and down of Kafka brokers using node pools. +**Description:** Testing the behavior of Cruise Control during both scaling up and down of Kafka brokers using KafkaNodePools. **Steps:** diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java index 05eb0f263e8..57823ef6b73 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java @@ -133,7 +133,7 @@ class ConnectST extends AbstractST { steps = { @Step(value = "Initialize Test Storage.", expected = "Test storage instance is created with required context."), @Step(value = "Define expected configurations.", expected = "Configurations are loaded from properties file."), - @Step(value = "Create and wait for resources.", expected = "Kafka resources, including node pools and KafkaConnect instances, are created and become ready."), + @Step(value = "Create and wait for resources.", expected = "Kafka resources, including KafkaNodePools and KafkaConnect instances, are created and become ready."), @Step(value = "Annotate for manual rolling update.", expected = "KafkaConnect components are annotated for a manual rolling update."), @Step(value = "Perform and wait for rolling update.", expected = "KafkaConnect components roll and new pods are deployed."), @Step(value = "Kafka Connect pod.", expected = "Pod configurations and annotations are verified."), @@ -262,7 +262,7 @@ void testKafkaConnectAndConnectorStateWithFileSinkPlugin() { description = @Desc("Test verifying Kafka Connect functionalities with Plain and SCRAM-SHA authentication."), steps = { @Step(value = "Create object instance of TestStorage.", expected = "Instance of TestStorage is created."), - @Step(value = "Create node pools using resourceManager based on the configuration.", expected = "node pools for broker and controller are created or not based on configuration."), + @Step(value = "Create KafkaNodePools using resourceManager based on the configuration.", expected = "KafkaNodePools for broker and controller are created or not based on configuration."), @Step(value = "Deploy Kafka with SCRAM-SHA-512 listener", expected = "Kafka is deployed with the specified listener authentication"), @Step(value = "Create KafkaUser with SCRAM-SHA authentication", expected = "KafkaUser is created using SCRAM-SHA authentication with the given credentials"), @Step(value = "Create KafkaTopic", expected = "KafkaTopic is created"), @@ -354,7 +354,7 @@ void testKafkaConnectWithPlainAndScramShaAuthentication() { description = @Desc("Test the functionality of Kafka Connect with a File Sink Plugin in a parallel namespace setup."), steps = { @Step(value = "Create and configure test storage", expected = "Test storage is set up with necessary configurations."), - @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), + @Step(value = "Create KafkaNodePools using resourceManager based on the configuration", expected = "KafkaNodePools for broker and controller are created or not based on configuration"), @Step(value = "Create and wait for the broker and controller pools", expected = "Broker and controller pools are created and running."), @Step(value = "Deploy and configure Kafka Connect with File Sink Plugin", expected = "Kafka Connect with File Sink Plugin is deployed and configured."), @Step(value = "Deploy Network Policies for Kafka Connect", expected = "Network Policies are successfully deployed for Kafka Connect."), @@ -420,8 +420,8 @@ void testKafkaConnectAndConnectorFileSinkPlugin() { description = @Desc("Test ensuring the JVM options and resource requests/limits are correctly applied to Kafka Connect components."), steps = { @Step(value = "Create TestStorage instance", expected = "TestStorage instance is created"), - @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), - @Step(value = "Create broker and controller KafkaNodePools.", expected = "Node pools are created and ready"), + @Step(value = "Create KafkaNodePools using resourceManager based on the configuration", expected = "KafkaNodePools for broker and controller are created or not based on configuration"), + @Step(value = "Create broker and controller KafkaNodePools.", expected = "KafkaNodePools are created and ready"), @Step(value = "Create Kafka cluster", expected = "Kafka cluster is created and operational"), @Step(value = "Setup JVM options and resource requirements for Kafka Connect", expected = "Kafka Connect is configured with specified JVM options and resources"), @Step(value = "Verify JVM options and resource requirements", expected = "JVM options and resource requests/limits are correctly applied to the Kafka Connect pod") @@ -473,7 +473,7 @@ void testJvmAndResources() { description = @Desc("Test verifying the scaling up and down functionality of Kafka Connect in a Kubernetes environment."), steps = { @Step(value = "Create TestStorage object instance", expected = "Instance of TestStorage is created"), - @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), + @Step(value = "Create KafkaNodePools using resourceManager based on the configuration", expected = "KafkaNodePools for broker and controller are created or not based on configuration"), @Step(value = "Create resources for KafkaNodePools and KafkaCluster", expected = "Resources are created and ready"), @Step(value = "Deploy Kafka Connect with file plugin", expected = "Kafka Connect is deployed with 1 initial replica"), @Step(value = "Verify the initial replica count", expected = "Initial replica count is verified to be 1"), @@ -525,7 +525,7 @@ void testKafkaConnectScaleUpScaleDown() { description = @Desc("This test verifies that Kafka Connect works with TLS and TLS client authentication."), steps = { @Step(value = "Create test storage instance", expected = "Test storage instance is created"), - @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), + @Step(value = "Create KafkaNodePools using resourceManager based on the configuration", expected = "KafkaNodePools for broker and controller are created or not based on configuration"), @Step(value = "Create resources for Kafka broker and Kafka Connect components", expected = "Resources are created and ready"), @Step(value = "Configure Kafka broker with TLS listener and client authentication", expected = "Kafka broker is configured correctly"), @Step(value = "Deploy Kafka user with TLS authentication", expected = "Kafka user is deployed with TLS authentication"), @@ -617,7 +617,7 @@ void testSecretsWithKafkaConnectWithTlsAndTlsClientAuthentication() { description = @Desc("Test validating Kafka Connect with TLS and SCRAM-SHA authentication along with associated resources setup and verification."), steps = { @Step(value = "Initialize test storage", expected = "Instances created successfully"), - @Step(value = "Create KafkaNodePool (broker and controller)", expected = "Node pools created and ready"), + @Step(value = "Create KafkaNodePool (broker and controller)", expected = "KafkaNodePools created and ready"), @Step(value = "Deploy Kafka cluster with TLS and SCRAM-SHA-512 authentication", expected = "Kafka cluster deployed with listeners configured"), @Step(value = "Create Kafka user with SCRAM-SHA-512", expected = "User created successfully"), @Step(value = "Deploy Kafka topic", expected = "Topic created successfully"), @@ -820,7 +820,7 @@ void testConnectorTaskAutoRestart() { description = @Desc("Test that verifies custom and updated environment variables and readiness/liveness probes for Kafka Connect."), steps = { @Step(value = "Create and configure Kafka Connect with initial values", expected = "Kafka Connect is created and configured with initial environment variables and readiness/liveness probes"), - @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), + @Step(value = "Create KafkaNodePools using resourceManager based on the configuration", expected = "KafkaNodePools for broker and controller are created or not based on configuration"), @Step(value = "Verify initial configuration and environment variables", expected = "Initial configuration and environment variables are as expected"), @Step(value = "Update Kafka Connect configuration and environment variables", expected = "Kafka Connect configuration and environment variables are updated"), @Step(value = "Verify updated configuration and environment variables", expected = "Updated configuration and environment variables are as expected") @@ -934,7 +934,7 @@ void testCustomAndUpdatedValues() { description = @Desc("Test validating multi-node Kafka Connect cluster creation, connector deployment, and message processing."), steps = { @Step(value = "Initialize test storage and determine connect cluster name", expected = "Test storage and cluster name properly initialized"), - @Step(value = "Create broker and controller KafkaNodePools.", expected = "Broker and controller node pools created successfully"), + @Step(value = "Create broker and controller KafkaNodePools.", expected = "Broker and controller KafkaNodePools created successfully"), @Step(value = "Deploy Kafka cluster in ephemeral mode", expected = "Kafka cluster deployed successfully"), @Step(value = "Create Kafka Connect cluster with default image", expected = "Kafka Connect cluster created with appropriate configuration"), @Step(value = "Create and configure Kafka Connector", expected = "Kafka Connector deployed and configured with correct settings"), @@ -1004,7 +1004,7 @@ void testMultiNodeKafkaConnectWithConnectorCreation() { description = @Desc("Test verifying Kafka connect TLS authentication with a username containing unusual characters."), steps = { @Step(value = "Set up a name of username containing dots and 64 characters", expected = ""), - @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), + @Step(value = "Create KafkaNodePools using resourceManager based on the configuration", expected = "KafkaNodePools for broker and controller are created or not based on configuration"), @Step(value = "Create Kafka broker, controller, topic, and Kafka user with the specified username", expected = "Resources are created with the expected configurations"), @Step(value = "Setup Kafka Connect with the created Kafka instance and TLS authentication", expected = "Kafka Connect is set up with the expected configurations"), @Step(value = "Check if the user can produce messages to Kafka", expected = "Messages are produced successfully"), @@ -1093,8 +1093,8 @@ void testConnectTlsAuthWithWeirdUserName() { @TestDoc( description = @Desc("Test verifying that Kafka Connect can authenticate with SCRAM-SHA-512 using a username with special characters and length exceeding typical constraints."), steps = { - @Step(value = "Create resource with node pools", expected = "Node Pools created successfully"), - @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), + @Step(value = "Create resource with KafkaNodePools", expected = "KafkaNodePools created successfully"), + @Step(value = "Create KafkaNodePools using resourceManager based on the configuration", expected = "KafkaNodePools for broker and controller are created or not based on configuration"), @Step(value = "Deploy Kafka cluster with SCRAM-SHA-512 authentication", expected = "Kafka cluster deployed with specified authentications"), @Step(value = "Create Kafka Topic", expected = "Topic created successfully"), @Step(value = "Create Kafka SCRAM-SHA-512 user with a weird username", expected = "User created successfully with SCRAM-SHA-512 credentials"), @@ -1187,7 +1187,7 @@ void testConnectScramShaAuthWithWeirdUserName() { description = @Desc("Test to validate scaling KafkaConnect without a connector to zero replicas."), steps = { @Step(value = "Initialize TestStorage and create namespace", expected = "Namespace and storage initialized"), - @Step(value = "Create broker and controller KafkaNodePools.", expected = "Node pools created with 3 replicas."), + @Step(value = "Create broker and controller KafkaNodePools.", expected = "KafkaNodePools created with 3 replicas."), @Step(value = "Create ephemeral Kafka cluster", expected = "Kafka cluster created with 3 replicas."), @Step(value = "Create KafkaConnect resource with 2 replicas", expected = "KafkaConnect resource created with 2 replicas."), @Step(value = "Verify that KafkaConnect has 2 pods", expected = "2 KafkaConnect pods are running."), @@ -1235,7 +1235,7 @@ void testScaleConnectWithoutConnectorToZero() { description = @Desc("Test scaling Kafka Connect with a connector to zero replicas."), steps = { @Step(value = "Create TestStorage instance", expected = "TestStorage instance is created with context"), - @Step(value = "Create broker and controller KafkaNodePools.", expected = "Broker and Controller node pools are created"), + @Step(value = "Create broker and controller KafkaNodePools.", expected = "Broker and Controller KafkaNodePools are created"), @Step(value = "Create ephemeral Kafka cluster", expected = "Kafka cluster with 3 replicas is created"), @Step(value = "Create Kafka Connect with file plugin", expected = "Kafka Connect is created with 2 replicas and file plugin"), @Step(value = "Create Kafka Connector", expected = "Kafka Connector is created with necessary configurations"), @@ -1303,7 +1303,7 @@ void testScaleConnectWithConnectorToZero() { description = @Desc("This test verifies the scaling functionality of Kafka Connect and Kafka Connector subresources."), steps = { @Step(value = "Initialize the test storage and create broker and controller pools", expected = "Broker and controller pools are created successfully"), - @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), + @Step(value = "Create KafkaNodePools using resourceManager based on the configuration", expected = "KafkaNodePools for broker and controller are created or not based on configuration"), @Step(value = "Deploy Kafka, Kafka Connect and Kafka Connector resources", expected = "Kafka, Kafka Connect and Kafka Connector resources are deployed successfully"), @Step(value = "Scale Kafka Connect subresource", expected = "Kafka Connect subresource is scaled successfully"), @Step(value = "Verify Kafka Connect subresource scaling", expected = "Kafka Connect replicas and observed generation are as expected"), @@ -1390,7 +1390,7 @@ void testScaleConnectAndConnectorSubresource() { steps = { @Step(value = "Create Secrets and ConfigMaps", expected = "Secrets and ConfigMaps are created successfully."), @Step(value = "Create Kafka environment", expected = "Kafka broker, Kafka Connect, and other resources are deployed successfully."), - @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), + @Step(value = "Create KafkaNodePools using resourceManager based on the configuration", expected = "KafkaNodePools for broker and controller are created or not based on configuration"), @Step(value = "Bind Secrets and ConfigMaps to Kafka Connect", expected = "Secrets and ConfigMaps are bound to Kafka Connect as volumes and environment variables."), @Step(value = "Verify environment variables", expected = "Kafka Connect environment variables contain expected values from Secrets and ConfigMaps."), @Step(value = "Verify mounted volumes", expected = "Kafka Connect mounted volumes contain expected values from Secrets and ConfigMaps.") @@ -1612,7 +1612,7 @@ void testMountingSecretAndConfigMapAsVolumesAndEnvVars() { @TestDoc( description = @Desc("Verifies Kafka Connect functionality when SCRAM-SHA authentication password is changed and the component is rolled."), steps = { - @Step(value = "Create node pools using resourceManager based on the configuration", expected = "node pools for broker and controller are created or not based on configuration"), + @Step(value = "Create KafkaNodePools using resourceManager based on the configuration", expected = "KafkaNodePools for broker and controller are created or not based on configuration"), @Step(value = "Create Kafka cluster with SCRAM-SHA authentication", expected = "Kafka cluster is created with SCRAM-SHA authentication enabled"), @Step(value = "Create a Kafka user with SCRAM-SHA authentication", expected = "Kafka user with SCRAM-SHA authentication is created"), @Step(value = "Deploy Kafka Connect with the created user credentials", expected = "Kafka Connect is deployed successfully"), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java index 2cb18195ff2..c44ab213e4e 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java @@ -503,7 +503,7 @@ void testCruiseControlIntraBrokerBalancing() { @IsolatedTest @MixedRoleNotSupported("Scaling a Kafka Node Pool with mixed roles is not supported yet") @TestDoc( - description = @Desc("Testing the behavior of Cruise Control during both scaling up and down of Kafka brokers using node pools."), + description = @Desc("Testing the behavior of Cruise Control during both scaling up and down of Kafka brokers using KafkaNodePools."), steps = { @Step(value = "Create broker and controller KafkaNodePools", expected = "Both KafkaNodePools are successfully created"), @Step(value = "Create initial Kafka cluster setup with Cruise Control and topic", expected = "Kafka cluster, topic, and scraper pod are created successfully"), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java index 74090a0743f..b95cdacd0e7 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java @@ -391,7 +391,7 @@ void testKafkaPodPendingDueToRack() { /** * @description This test case verifies the rolling update behavior of Kafka controller nodes under specific conditions. - * It focuses on ensuring that changes in Kafka configuration and node pool properties affect only the intended node pools, + * It focuses on ensuring that changes in Kafka configuration and node pool properties affect only the intended KafkaNodePools, * particularly the controller nodes, while leaving others like broker nodes unaffected. * * @steps From 9af2a8729fde2b80e5a2a828b125ef325ba2ef2e Mon Sep 17 00:00:00 2001 From: see-quick Date: Thu, 3 Oct 2024 16:03:08 +0200 Subject: [PATCH 07/12] make it more consistent Signed-off-by: see-quick --- .../io.strimzi.systemtest.kafka.KafkaNodePoolST.md | 4 ++-- .../io.strimzi.systemtest.kafka.listeners.ListenersST.md | 2 +- .../io/strimzi/systemtest/cruisecontrol/CruiseControlST.java | 2 +- .../java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java | 4 ++-- .../io/strimzi/systemtest/kafka/listeners/ListenersST.java | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md index b0c1bda07fa..72a8f5732c6 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md @@ -85,10 +85,10 @@ | 1. | Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools, both with mixed role, first one stable, second one which will be modified. | Kafka instance with initial node pools is deployed. | | 2. | Create KafkaTopic with replica number requiring all Kafka Brokers to be present. | KafkaTopic is created. | | 3. | Annotate one of node pools to perform manual Rolling Update. | Rolling Update started. | -| 4. | Change role of Kafka Node Pool from mixed to controller only role. | Role Change is prevented due to existing KafkaTopic replicas and ongoing Rolling Update. | +| 4. | Change role ofKafkaNodePool from mixed to controller only role. | Role Change is prevented due to existing KafkaTopic replicas and ongoing Rolling Update. | | 5. | Original Rolling Update finishes successfully. | Rolling Update is completed. | | 6. | Delete previously created KafkaTopic. | KafkaTopic is deleted and Node Pool role change is initiated. | -| 7. | Change role of Kafka Node Pool from controller only to mixed role. | Kafka Node Pool changes role to mixed role. | +| 7. | Change role ofKafkaNodePool from controller only to mixed role. | KafkaNodePool changes role to mixed role. | | 8. | Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present. | Messages are produced and consumed successfully. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md index bb821722cc1..1b51d19c5f8 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md @@ -515,7 +515,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create Kafka node pool resources. | Persistent storage node pools are created. | +| 1. | CreateKafkaNodePool resources. | Persistent storage node pools are created. | | 2. | Disable plain listener and enable tls listener in Kafka resource. | Kafka with plain listener disabled and tls listener enabled is created. | | 3. | Create Kafka topic and user. | Kafka topic and tls user are created. | | 4. | Configure and deploy Kafka clients. | Kafka clients producer and consumer with tls are deployed. | diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java index c44ab213e4e..1cc4e18a79b 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java @@ -501,7 +501,7 @@ void testCruiseControlIntraBrokerBalancing() { } @IsolatedTest - @MixedRoleNotSupported("Scaling a Kafka Node Pool with mixed roles is not supported yet") + @MixedRoleNotSupported("Scaling aKafkaNodePool with mixed roles is not supported yet") @TestDoc( description = @Desc("Testing the behavior of Cruise Control during both scaling up and down of Kafka brokers using KafkaNodePools."), steps = { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java index 388703926f0..e1279d4ca8d 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java @@ -179,10 +179,10 @@ void testKafkaNodePoolBrokerIdsManagementUsingAnnotations() { @Step(value = "Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools, both with mixed role, first one stable, second one which will be modified.", expected = "Kafka instance with initial node pools is deployed."), @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present.", expected = "KafkaTopic is created."), @Step(value = "Annotate one of node pools to perform manual Rolling Update.", expected = "Rolling Update started."), - @Step(value = "Change role of Kafka Node Pool from mixed to controller only role.", expected = "Role Change is prevented due to existing KafkaTopic replicas and ongoing Rolling Update."), + @Step(value = "Change role ofKafkaNodePool from mixed to controller only role.", expected = "Role Change is prevented due to existing KafkaTopic replicas and ongoing Rolling Update."), @Step(value = "Original Rolling Update finishes successfully.", expected = "Rolling Update is completed."), @Step(value = "Delete previously created KafkaTopic.", expected = "KafkaTopic is deleted and Node Pool role change is initiated."), - @Step(value = "Change role of Kafka Node Pool from controller only to mixed role.", expected = "Kafka Node Pool changes role to mixed role."), + @Step(value = "Change role ofKafkaNodePool from controller only to mixed role.", expected = "KafkaNodePool changes role to mixed role."), @Step(value = "Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present.", expected = "Messages are produced and consumed successfully.") }, labels = { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java index a75272ba5a5..8be2ff7d2b2 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java @@ -180,7 +180,7 @@ void testSendMessagesPlainAnonymous() { @TestDoc( description = @Desc("Test sending messages over tls transport using mutual tls auth."), steps = { - @Step(value = "Create Kafka node pool resources.", expected = "Persistent storage node pools are created."), + @Step(value = "CreateKafkaNodePool resources.", expected = "Persistent storage node pools are created."), @Step(value = "Disable plain listener and enable tls listener in Kafka resource.", expected = "Kafka with plain listener disabled and tls listener enabled is created."), @Step(value = "Create Kafka topic and user.", expected = "Kafka topic and tls user are created."), @Step(value = "Configure and deploy Kafka clients.", expected = "Kafka clients producer and consumer with tls are deployed."), From 68db5bf4103611b4c2f76479a29f1302f761c89f Mon Sep 17 00:00:00 2001 From: see-quick Date: Fri, 4 Oct 2024 10:34:45 +0200 Subject: [PATCH 08/12] review from Jakub more consistency added Signed-off-by: see-quick # Conflicts: # systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java --- ...io.strimzi.systemtest.connect.ConnectST.md | 26 +++---- ...isecontrol.CruiseControlConfigurationST.md | 2 +- ...rimzi.systemtest.kafka.ConfigProviderST.md | 8 +-- ...trimzi.systemtest.kafka.KafkaNodePoolST.md | 60 +++++++++------- .../io.strimzi.systemtest.kafka.KafkaST.md | 12 ++-- ...trimzi.systemtest.kafka.TieredStorageST.md | 2 +- ...afka.dynamicconfiguration.DynamicConfST.md | 4 +- ...ynamicconfiguration.DynamicConfSharedST.md | 20 +++++- ....systemtest.kafka.listeners.ListenersST.md | 10 +-- ...est.kafka.listeners.MultipleListenersST.md | 28 ++++---- development-docs/systemtests/labels/kafka.md | 1 + .../io/strimzi/systemtest/Environment.java | 2 +- .../annotations/MixedRoleNotSupported.java | 2 +- .../strimzi/systemtest/connect/ConnectST.java | 26 +++---- .../CruiseControlConfigurationST.java | 2 +- .../systemtest/kafka/ConfigProviderST.java | 8 +-- .../systemtest/kafka/KafkaNodePoolST.java | 72 ++++++++++--------- .../io/strimzi/systemtest/kafka/KafkaST.java | 12 ++-- .../systemtest/kafka/TieredStorageST.java | 2 +- .../dynamicconfiguration/DynamicConfST.java | 4 +- .../DynamicConfSharedST.java | 15 +++- .../kafka/listeners/ListenersST.java | 10 +-- .../kafka/listeners/MultipleListenersST.java | 28 ++++---- .../systemtest/log/LoggingChangeST.java | 8 +-- .../operators/MultipleClusterOperatorsST.java | 4 +- .../systemtest/operators/topic/TopicST.java | 12 ++-- .../rollingupdate/KafkaRollerST.java | 12 ++-- .../rollingupdate/RollingUpdateST.java | 8 +-- .../security/custom/CustomCaST.java | 12 ++-- .../systemtest/specific/SpecificST.java | 4 +- .../systemtest/upgrade/AbstractUpgradeST.java | 6 +- .../watcher/AbstractNamespaceST.java | 42 +++++------ .../watcher/MultipleNamespaceST.java | 2 +- 33 files changed, 257 insertions(+), 209 deletions(-) diff --git a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md index d9ba3a950e3..4d454853d4c 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md @@ -28,7 +28,7 @@ | 4. | Create Kafka Topic | Topic created successfully | | 5. | Create Kafka SCRAM-SHA-512 user with a weird username | User created successfully with SCRAM-SHA-512 credentials | | 6. | Deploy Kafka Connect with SCRAM-SHA-512 authentication | Kafka Connect instance deployed and configured with user credentials | -| 7. | Deploy Kafka Connector | Kafka Connector deployed and configured successfully | +| 7. | Deploy KafkaConnector | KafkaConnector deployed and configured successfully | | 8. | Send messages using the configured client | Messages sent successfully | | 9. | Verify that connector receives messages | Messages consumed by the connector and written to the specified sink | @@ -66,9 +66,9 @@ | Step | Action | Result | | - | - | - | | 1. | Create test storage instance | Test storage instance is created | -| 2. | Create node pool resources | Node pool resources are created and waited for readiness | +| 2. | Create KafkaNodePool resources | KafkaNodePool resources are created and waited for readiness | | 3. | Create Kafka cluster | Kafka cluster is created and waited for readiness | -| 4. | Deploy EchoSink Kafka Connector with autor restart enabled | Kafka Connector is created with auto-restart enabled | +| 4. | Deploy EchoSink KafkaConnector with autor restart enabled | KafkaConnector is created with auto-restart enabled | | 5. | Send first batch of messages | First batch of messages is sent to the topic | | 6. | Ensure connection success for the first batch | Successfully produce the first batch of messages | | 7. | Send second batch of messages | Second batch of messages is sent to the topic | @@ -115,7 +115,7 @@ | 4. | Annotate for manual rolling update. | KafkaConnect components are annotated for a manual rolling update. | | 5. | Perform and wait for rolling update. | KafkaConnect components roll and new pods are deployed. | | 6. | Kafka Connect pod. | Pod configurations and annotations are verified. | -| 7. | Kafka Connectors. | Various Kafka Connect resource labels and configurations are verified to ensure correct deployment. | +| 7. | KafkaConnectors. | Various Kafka Connect resource labels and configurations are verified to ensure correct deployment. | **Labels:** @@ -155,10 +155,10 @@ | 3. | Create and wait for the broker and controller pools | Broker and controller pools are created and running. | | 4. | Deploy and configure Kafka Connect with File Sink Plugin | Kafka Connect with File Sink Plugin is deployed and configured. | | 5. | Deploy Network Policies for Kafka Connect | Network Policies are successfully deployed for Kafka Connect. | -| 6. | Create and wait for Kafka Connector | Kafka Connector is created and running. | +| 6. | Create and wait for KafkaConnector | KafkaConnector is created and running. | | 7. | Deploy and configure scraper pod | Scraper pod is deployed and configured. | | 8. | Deploy and configure Kafka clients | Kafka clients are deployed and configured. | -| 9. | Execute assertions to verify the Kafka Connector configuration and status | Assertions confirm the Kafka Connector is successfully deployed, has the correct configuration, and is running. | +| 9. | Execute assertions to verify the KafkaConnector configuration and status | Assertions confirm the KafkaConnector is successfully deployed, has the correct configuration, and is running. | **Labels:** @@ -282,8 +282,8 @@ | 2. | Create broker and controller KafkaNodePools. | Broker and controller KafkaNodePools created successfully | | 3. | Deploy Kafka cluster in ephemeral mode | Kafka cluster deployed successfully | | 4. | Create Kafka Connect cluster with default image | Kafka Connect cluster created with appropriate configuration | -| 5. | Create and configure Kafka Connector | Kafka Connector deployed and configured with correct settings | -| 6. | Verify the status of the Kafka Connector | Kafka Connector status retrieved and worker node identified | +| 5. | Create and configure KafkaConnector | KafkaConnector deployed and configured with correct settings | +| 6. | Verify the status of the KafkaConnector | KafkaConnector status retrieved and worker node identified | | 7. | Deploy Kafka clients for producer and consumer | Kafka producer and consumer clients deployed | | 8. | Verify that Kafka Connect writes messages to the specified file sink | Messages successfully written to the file sink by Kafka Connect | @@ -294,7 +294,7 @@ ## testScaleConnectAndConnectorSubresource -**Description:** This test verifies the scaling functionality of Kafka Connect and Kafka Connector subresources. +**Description:** This test verifies the scaling functionality of Kafka Connect and KafkaConnector subresources. **Steps:** @@ -302,7 +302,7 @@ | - | - | - | | 1. | Initialize the test storage and create broker and controller pools | Broker and controller pools are created successfully | | 2. | Create KafkaNodePools using resourceManager based on the configuration | KafkaNodePools for broker and controller are created or not based on configuration | -| 3. | Deploy Kafka, Kafka Connect and Kafka Connector resources | Kafka, Kafka Connect and Kafka Connector resources are deployed successfully | +| 3. | Deploy Kafka, Kafka Connect and KafkaConnector resources | Kafka, Kafka Connect and KafkaConnector resources are deployed successfully | | 4. | Scale Kafka Connect subresource | Kafka Connect subresource is scaled successfully | | 5. | Verify Kafka Connect subresource scaling | Kafka Connect replicas and observed generation are as expected | | 6. | Scale Kafka Connector subresource | Kafka Connector subresource task max is set correctly | @@ -325,12 +325,12 @@ | 2. | Create broker and controller KafkaNodePools. | Broker and Controller KafkaNodePools are created | | 3. | Create ephemeral Kafka cluster | Kafka cluster with 3 replicas is created | | 4. | Create Kafka Connect with file plugin | Kafka Connect is created with 2 replicas and file plugin | -| 5. | Create Kafka Connector | Kafka Connector is created with necessary configurations | +| 5. | Create KafkaConnector | KafkaConnector is created with necessary configurations | | 6. | Check Kafka Connect pods | There are 2 Kafka Connect pods | | 7. | Scale down Kafka Connect to zero | Kafka Connect is scaled down to 0 replicas | | 8. | Wait for Kafka Connect to be ready | Kafka Connect readiness is verified | -| 9. | Wait for Kafka Connector to not be ready | Kafka Connector readiness is verified | -| 10. | Verify conditions | Pod size is 0, Kafka Connect is ready, Kafka Connector is not ready due to zero replicas | +| 9. | Wait for KafkaConnector to not be ready | KafkaConnector readiness is verified | +| 10. | Verify conditions | Pod size is 0, Kafka Connect is ready, KafkaConnector is not ready due to zero replicas | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlConfigurationST.md b/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlConfigurationST.md index 99480194e64..e0e1b52444f 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlConfigurationST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlConfigurationST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Set up the Cluster Operator | Cluster Operator is installed and running | +| 1. | Set up the cluster operator | Cluster operator is installed and running | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md index b9b1eba5107..6de134d07f9 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md @@ -1,6 +1,6 @@ # ConfigProviderST -**Description:** This test verifies Kafka Connect using ConfigMap and EnvVar configuration. +**Description:** This test suite verifies Kafka Connect using ConfigMap and EnvVar configuration. **Before tests execution steps:** @@ -16,16 +16,16 @@ ## testConnectWithConnectorUsingConfigAndEnvProvider -**Description:** Tests to ensure Kafka Connect functions correctly using ConfigMap and EnvVar configuration. +**Description:** Test to ensure Kafka Connect functions correctly using ConfigMap and EnvVar configuration. **Steps:** | Step | Action | Result | | - | - | - | | 1. | Create broker and controller KafkaNodePools. | Resources are created and are in ready state. | -| 2. | Create Kafka cluster. | Kafka cluster is ready with 3 brokers. | +| 2. | Create Kafka cluster. | Kafka cluster is ready | | 3. | Create ConfigMap for connector configuration. | ConfigMap with connector configuration is created. | -| 4. | Deploy Kafka Connect with external configuration. | Kafka Connect is deployed with proper configuration. | +| 4. | Deploy Kafka Connect with external configuration from ConfigMap. | Kafka Connect is deployed with proper configuration. | | 5. | Create necessary Role and RoleBinding for connector. | Role and RoleBinding are created and applied. | | 6. | Deploy Kafka connector. | Kafka connector is successfully deployed. | | 7. | Deploy Kafka clients. | Kafka clients are deployed and ready. | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md index 72a8f5732c6..c26f758c3d3 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md @@ -17,19 +17,19 @@ ## testKafkaManagementTransferToAndFromKafkaNodePool -**Description:** This test verifies Kafka Cluster migration to and from node pools, using the necessary Kafka and KafkaNodePool resources and annotations. +**Description:** This test verifies Kafka cluster migration to and from KafkaNodePools, using the necessary Kafka and KafkaNodePool resources and annotations. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy a Kafka cluster with the annotation to enable node pool management, and configure a KafkaNodePool resource to target the Kafka cluster. | Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected. | -| 2. | Modify KafkaNodePool by increasing number of Kafka Replicas. | Number of Kafka Pods is increased to match specification from KafkaNodePool. | -| 3. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | -| 4. | Disable KafkaNodePool management in the Kafka custom resource using the node pool annotation. | StrimziPodSet is modified, pods are replaced, and any KafkaNodePool specifications (i.e., changed replica count) are ignored. | -| 5. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | -| 6. | Enable node pool management in the Kafka custom resource using the node pool annotation. | New StrimziPodSet is created, pods are replaced , and any KafkaNodePool specifications (i.e., changed replica count) take priority over Kafka specifications. | -| 7. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | +| 1. | Deploy a Kafka cluster with the annotation to enable KafkaNodePool management, and configure a KafkaNodePool resource to target the Kafka cluster. | Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected. | +| 2. | Modify KafkaNodePool by increasing number of Kafka replicas. | Number of Kafka Pods is increased to match specification from KafkaNodePool. | +| 3. | Produce and consume messages in given Kafka cluster. | Clients can produce and consume messages. | +| 4. | Disable KafkaNodePool management in the Kafka CustomResource using the KafkaNodePool annotation. | StrimziPodSet is modified, pods are replaced, and any KafkaNodePool specifications (i.e., changed replica count) are ignored. | +| 5. | Produce and consume messages in given Kafka cluster. | Clients can produce and consume messages. | +| 6. | Enable KafkaNodePool management in the Kafka CustomResource using the KafkaNodePool annotation. | New StrimziPodSet is created, pods are replaced , and any KafkaNodePool specifications (i.e., changed replica count) take priority over Kafka specifications. | +| 7. | Produce and consume messages in given Kafka cluster. | Clients can produce and consume messages. | **Labels:** @@ -44,10 +44,10 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy a Kafka instance with annotations to manage node pools and one initial node pool to hold topics and act as controller. | Kafka instance is deployed according to Kafka and KafkaNodePool custom resource, with IDs 90, 91. | -| 2. | Deploy additional 2 node pools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]). | node pools are deployed, node pool A contains ID 4, node pool B contains IDs 6, 0. | -| 3. | Annotate node pool A 'next-node-ids' and node pool B 'remove-node-ids' respectively ([20-21],[6,55]) afterward scale to 4 and 1 replicas resp. | node pools are scaled, node pool A contains IDs 4, 20, 21, 1. node pool B contains ID 0. | -| 4. | Annotate node pool A 'remove-node-ids' and node pool B 'next-node-ids' respectively ([20],[1]) afterward scale to 2 and 6 replicas resp. | node pools are scaled, node pool A contains IDs 1, 4. node pool B contains IDs 2, 3, 5. | +| 1. | Deploy a Kafka instance with annotations to manage KafkaNodePools and one initial KafkaNodePool to hold topics and act as controller. | Kafka instance is deployed according to Kafka and KafkaNodePool CustomResource, with IDs 90, 91. | +| 2. | Deploy additional 2 KafkaNodePools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]). | KafkaNodePools are deployed, KafkaNodePool A contains ID 4, KafkaNodePool B contains IDs 6, 0. | +| 3. | Annotate KafkaNodePool A 'next-node-ids' and KafkaNodePool B 'remove-node-ids' respectively ([20-21],[6,55]) afterward scale to 4 and 1 replicas resp. | KafkaNodePools are scaled, KafkaNodePool A contains IDs 4, 20, 21, 1. KafkaNodePool B contains ID 0. | +| 4. | Annotate KafkaNodePool A 'remove-node-ids' and KafkaNodePool B 'next-node-ids' respectively ([20],[1]) afterward scale to 2 and 6 replicas resp. | KafkaNodePools are scaled, KafkaNodePool A contains IDs 1, 4. KafkaNodePool B contains IDs 2, 3, 5. | **Labels:** @@ -62,12 +62,18 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools. | Kafka instance is deployed according to Kafka and KafkaNodePool custom resource. | -| 2. | Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully, KafkaTopic created and cleaned as expected. | -| 3. | Add extra KafkaNodePool with broker role to the Kafka. | KafkaNodePool is deployed and ready. | -| 4. | Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully, KafkaTopic created and cleaned as expected. | -| 5. | Remove one kafkaNodePool with broker role. | KafkaNodePool is removed, Pods are deleted, but other pods in Kafka are stable and ready. | -| 6. | Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully, KafkaTopic created and cleaned as expected. | +| 1. | Deploy a Kafka instance with annotations to manage KafkaNodePools and 2 initial KafkaNodePools. | Kafka instance is deployed according to Kafka and KafkaNodePool CustomResource. | +| 2. | Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present. | KafkaTopic created. | +| 3. | Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully. | +| 4. | Remove KafkaTopic. | KafkaTopic is cleaned as expected. | +| 5. | Add extra KafkaNodePool with broker role to the Kafka. | KafkaNodePool is deployed and ready. | +| 6. | Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present. | KafkaTopic created. | +| 7. | Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully. | +| 8. | Remove KafkaTopic. | KafkaTopic is cleaned as expected. | +| 9. | Remove one KafkaNodePool with broker role. | KafkaNodePool is removed, Pods are deleted, but other pods in Kafka are stable and ready. | +| 10. | Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present. | KafkaTopic created. | +| 11. | Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully. | +| 12. | Remove KafkaTopic. | KafkaTopic is cleaned as expected. | **Labels:** @@ -82,14 +88,16 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools, both with mixed role, first one stable, second one which will be modified. | Kafka instance with initial node pools is deployed. | -| 2. | Create KafkaTopic with replica number requiring all Kafka Brokers to be present. | KafkaTopic is created. | -| 3. | Annotate one of node pools to perform manual Rolling Update. | Rolling Update started. | -| 4. | Change role ofKafkaNodePool from mixed to controller only role. | Role Change is prevented due to existing KafkaTopic replicas and ongoing Rolling Update. | -| 5. | Original Rolling Update finishes successfully. | Rolling Update is completed. | -| 6. | Delete previously created KafkaTopic. | KafkaTopic is deleted and Node Pool role change is initiated. | -| 7. | Change role ofKafkaNodePool from controller only to mixed role. | KafkaNodePool changes role to mixed role. | -| 8. | Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present. | Messages are produced and consumed successfully. | +| 1. | Deploy a Kafka instance with annotations to manage KafkaNodePools and 2 initial KafkaNodePools, both with mixed role, first one stable, second one which will be modified. | Kafka instance with initial KafkaNodePools is deployed. | +| 2. | Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present. | KafkaTopic created. | +| 3. | Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully. | +| 4. | Remove KafkaTopic. | KafkaTopic is cleaned as expected. | +| 5. | Annotate one of KafkaNodePools to perform manual rolling update. | rolling update started. | +| 6. | Change role ofKafkaNodePool from mixed to controller only role. | Role Change is prevented due to existing KafkaTopic replicas and ongoing rolling update. | +| 7. | Original rolling update finishes successfully. | rolling update is completed. | +| 8. | Delete previously created KafkaTopic. | KafkaTopic is deleted and KafkaNodePool role change is initiated. | +| 9. | Change role ofKafkaNodePool from controller only to mixed role. | KafkaNodePool changes role to mixed role. | +| 10. | Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present. | Messages are produced and consumed successfully. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md index 97bed6d7fa5..00fd4168661 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md @@ -1,12 +1,12 @@ # KafkaST -**Description:** Test suite containing kafka related stuff (i.e., JVM resources, EO, TO or UO removal from Kafka cluster), which ensures proper functioning of Kafka clusters. +**Description:** Test suite containing Kafka related stuff (i.e., JVM resources, EO, TO or UO removal from Kafka cluster), which ensures proper functioning of Kafka clusters. **Before tests execution steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy Cluster Operator across all namespaces, with custom configuration. | Cluster Operator is deployed. | +| 1. | Deploy cluster operator across all namespaces, with custom configuration. | Cluster operator is deployed. | **Labels:** @@ -71,13 +71,13 @@ ## testKRaftMode -**Description:** This test case verifies basic working of Kafka Cluster managed by Cluster Operator with KRaft. +**Description:** This test case verifies basic working of Kafka Cluster managed by cluster operator with KRaft. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy Kafka annotated to enable KRaft (and additionally annotated to enable node pool management), and configure a KafkaNodePool resource to target the Kafka cluster. | Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected. | +| 1. | Deploy Kafka annotated to enable KRaft (and additionally annotated to enable KafkaNodePool management), and configure a KafkaNodePool resource to target the Kafka cluster. | Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected. | | 2. | Produce and consume messages in given Kafka Cluster. | Clients can produce and consume messages. | | 3. | Trigger manual Rolling Update. | Rolling update is triggered and completed shortly after. | @@ -96,9 +96,9 @@ | - | - | - | | 1. | Deploy Kafka with persistent storage and JBOD storage with 2 volumes, both of which are configured to delete their Persistent Volume Claims on Kafka cluster un-provision. | Kafka is deployed, volumes are labeled and linked to Pods correctly. | | 2. | Verify that labels in Persistent Volume Claims are set correctly. | Persistent Volume Claims contains expected labels and values. | -| 3. | Modify Kafka Custom Resource, specifically 'deleteClaim' property of its first Kafka Volume. | Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by Cluster Operator. | +| 3. | Modify Kafka CustomResource, specifically 'deleteClaim' property of its first Kafka Volume. | Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by cluster operator. | | 4. | Delete Kafka cluster. | Kafka cluster and its components are deleted, including Persistent Volume Claim of Volume with 'deleteClaim' property set to true. | -| 5. | Verify remaining Persistent Volume Claims. | Persistent Volume Claim referenced by volume of formerly deleted Kafka Custom Resource with property 'deleteClaim' set to true is still present. | +| 5. | Verify remaining Persistent Volume Claims. | Persistent Volume Claim referenced by volume of formerly deleted Kafka CustomResource with property 'deleteClaim' set to true is still present. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md index 8477ed90ad4..2f23bec8f63 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.TieredStorageST.md @@ -27,7 +27,7 @@ | Step | Action | Result | | - | - | - | | 1. | Deploys KafkaNodePool resource with PV of size 10Gi. | KafkaNodePool resource is deployed successfully with specified configuration. | -| 2. | Deploys Kafka resource with configuration of Tiered Storage for Aiven plugin, pointing to Minio S3, and with image built in beforeAll. | Kafka resource is deployed successfully with Tiered Storage configuration. | +| 2. | Deploy Kafka CustomResource with Tiered Storage configuration pointing to Minio S3, using a built Kafka image. Reduce the `remote.log.manager.task.interval.ms` and `log.retention.check.interval.ms` to minimize delays during log uploads and deletions. | Kafka CustomResource is deployed successfully with optimized intervals to speed up log uploads and local log deletions. | | 3. | Creates topic with enabled Tiered Storage sync with size of segments set to 10mb (this is needed to speed up the sync). | Topic is created successfully with Tiered Storage enabled and segment size of 10mb. | | 4. | Starts continuous producer to send data to Kafka. | Continuous producer starts sending data to Kafka. | | 5. | Wait until Minio size is not empty (contains data from Kafka). | Minio contains data from Kafka. | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md index c300b7fe58b..a05d8d913e8 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfST.md @@ -1,12 +1,12 @@ # DynamicConfST -**Description:** DynamicConfST is responsible for verifying that changes in dynamic Kafka configuration do not trigger a rolling update. +**Description:** Responsible for verifying that changes in dynamic Kafka configuration do not trigger a rolling update. **Before tests execution steps:** | Step | Action | Result | | - | - | - | -| 1. | Deploy the Cluster Operator. | Cluster Operator is installed successfully. | +| 1. | Deploy the cluster operator. | Cluster operator is installed successfully. |
diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md index ab4dc47f2b3..d6ee5ef7d7d 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Run Cluster Operator installation. | Cluster Operator is installed. | +| 1. | Run cluster operator installation. | Cluster operator is installed. | | 2. | Deploy shared Kafka across all test cases. | Shared Kafka is deployed. | | 3. | Deploy scraper pod. | Scraper pod is deployed. | @@ -16,3 +16,21 @@ * [kafka](labels/kafka.md)
+ +## testDynConfiguration + +**Description:** This test dynamically selects and applies three Kafka dynamic configuration properties to verify that the changes do not trigger a rolling update in the Kafka cluster. It applies the configurations, waits for stability, and then verifies that the new configuration is applied both to the CustomResource (CR) and the running Kafka pods. + +**Steps:** + +| Step | Action | Result | +| - | - | - | +| 1. | Randomly choose three configuration properties for dynamic update. | Three configurations are selected without duplication. | +| 2. | Apply the chosen configuration properties to the Kafka CustomResource. | The configurations are applied successfully without triggering a rolling update. | +| 3. | Verify the applied configuration on both the Kafka CustomResource and the Kafka pods. | The applied configurations are correctly reflected in the Kafka CustomResource and the kafka pods. | + +**Labels:** + +* `dynamic-configuration` (description file doesn't exist) +* [kafka](labels/kafka.md) + diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md index 1b51d19c5f8..fb996159144 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md @@ -255,7 +255,7 @@ | Step | Action | Result | | - | - | - | | 1. | Create custom secret | Custom secret is created with the specified certificate and key | -| 2. | Create Kafka resources with node pools | Kafka brokers and controller pools are created and configured | +| 2. | Create Kafka resources with KafkaNodePools | Kafka brokers and controller pools are created and configured | | 3. | Create Kafka cluster with listeners | Kafka cluster is created with internal and load balancer listeners using the custom certificates | | 4. | Create TLS user | TLS user is created | | 5. | Verify produced and consumed messages via external client | Messages are successfully produced and consumed using the custom certificates | @@ -341,7 +341,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create and configure KafkaNodePools | Node pools for brokers and controllers are created | +| 1. | Create and configure KafkaNodePools | KafkaNodePools for brokers and controllers are created | | 2. | Create and configure Kafka cluster with TLS listener | Kafka cluster with TLS enabled LoadBalancer listener is created | | 3. | Create and configure Kafka user with TLS authentication | Kafka user with TLS authentication is created | | 4. | Wait for the LoadBalancer address to be reachable | LoadBalancer address becomes reachable | @@ -403,7 +403,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create Kafka broker and controller node pools. | Broker and controller node pools are created | +| 1. | Create Kafka broker and controller KafkaNodePools. | Broker and controller KafkaNodePools are created | | 2. | Deploy Kafka cluster with NodePort listener and TLS enabled | Kafka cluster is deployed with NodePort listener and TLS | | 3. | Create a Kafka topic | Kafka topic is created | | 4. | Create a Kafka user with TLS authentication | Kafka user with TLS authentication is created | @@ -459,7 +459,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create a Kafka cluster with broker and controller node pools. | Kafka cluster is created with node pools. | +| 1. | Create a Kafka cluster with broker and controller KafkaNodePools. | Kafka cluster is created with KafkaNodePools. | | 2. | Create a Kafka cluster with custom listener using TLS and SCRAM-SHA authentication. | Kafka cluster with custom listener is ready. | | 3. | Create a Kafka topic and SCRAM-SHA user. | Kafka topic and user are created. | | 4. | Transmit messages over TLS using SCRAM-SHA authentication. | Messages are transmitted successfully. | @@ -515,7 +515,7 @@ | Step | Action | Result | | - | - | - | -| 1. | CreateKafkaNodePool resources. | Persistent storage node pools are created. | +| 1. | CreateKafkaNodePool resources. | Persistent storage KafkaNodePools are created. | | 2. | Disable plain listener and enable tls listener in Kafka resource. | Kafka with plain listener disabled and tls listener enabled is created. | | 3. | Create Kafka topic and user. | Kafka topic and tls user are created. | | 4. | Configure and deploy Kafka clients. | Kafka clients producer and consumer with tls are deployed. | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md index db06be6c04a..c4858ef96e3 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md @@ -1,6 +1,6 @@ # MultipleListenersST -**Description:** Test to verify the functionality of using multiple NodePorts in a Kafka cluster within the same namespace. +**Description:** Test to verify the functionality of using multiple NodePort listeners in a Kafka cluster within the same namespace. **Labels:** @@ -10,13 +10,13 @@ ## testCombinationOfEveryKindOfListener -**Description:** Verifies the combination of every kind of Kafka listener: INTERNAL, NODEPORT, ROUTE, and LOADBALANCER. +**Description:** Verifies the combination of every kind of Kafka listener: Internal, NodePort, Route, and LOADBALANCER. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Retrieve different types of Kafka listeners. | Lists of INTERNAL, NODEPORT, ROUTE, and LOADBALANCER listeners are retrieved. | +| 1. | Retrieve different types of Kafka listeners. | Lists of Internal, NodePort, Route, and LOADBALANCER listeners are retrieved. | | 2. | Combine all different listener lists. | A combined list of all Kafka listener types is created. | | 3. | Run listeners test with combined listener list. | Listeners test runs with all types of Kafka listeners in the combined list. | @@ -27,14 +27,14 @@ ## testCombinationOfInternalAndExternalListeners -**Description:** Test verifying the combination of internal and external Kafka listeners. +**Description:** Test verifying the combination of Internal and Enternal Kafka listeners. **Steps:** | Step | Action | Result | | - | - | - | | 1. | Check if the environment supports cluster-wide NodePort rights. | Test is skipped if the environment is not suitable. | -| 2. | Retrieve and combine internal and NodePort listeners. | Listeners are successfully retrieved and combined. | +| 2. | Retrieve and combine Internal and NodePort listeners. | Listeners are successfully retrieved and combined. | | 3. | Run listeners test with combined listeners. | Listeners test is executed successfully. | **Labels:** @@ -44,15 +44,15 @@ ## testMixtureOfExternalListeners -**Description:** Test ensuring that different types of external Kafka listeners (ROUTE and NODEPORT) work correctly when mixed. +**Description:** Test ensuring that different types of Enternal Kafka listeners (Route and NodePort) work correctly when mixed. **Steps:** | Step | Action | Result | | - | - | - | | 1. | Retrieve route listeners. | Route listeners are retrieved from test cases. | -| 2. | Retrieve nodeport listeners. | Nodeport listeners are retrieved from test cases. | -| 3. | Combine route and nodeport listeners. | Multiple different listeners list is populated. | +| 2. | Retrieve NodePort listeners. | Nodeport listeners are retrieved from test cases. | +| 3. | Combine route and NodePort listeners. | Multiple different listeners list is populated. | | 4. | Run listeners test. | Listeners test runs using the combined list. | **Labels:** @@ -68,7 +68,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Run the internal Kafka listeners test. | Listeners test runs successfully on the specified cluster. | +| 1. | Run the Internal Kafka listeners test. | Listeners test runs successfully on the specified cluster. | **Labels:** @@ -77,14 +77,14 @@ ## testMultipleLoadBalancers -**Description:** Test verifying the behavior of multiple load balancers in a single namespace using more than one Kafka cluster. +**Description:** Test verifying the behavior of multiple LoadBalancers in a single namespace using more than one Kafka cluster. **Steps:** | Step | Action | Result | | - | - | - | -| 1. | Run listeners test with LOADBALANCER type. | Listeners test executes successfully with load balancers. | -| 2. | Validate the results. | Results match the expected outcomes for multiple load balancers. | +| 1. | Run listeners test with LoadBalancer type. | Listeners test executes successfully with LoadBalancers. | +| 2. | Validate the results. | Results match the expected outcomes for multiple LoadBalancers. | **Labels:** @@ -93,7 +93,7 @@ ## testMultipleNodePorts -**Description:** Test verifying the functionality of using multiple NodePorts in a Kafka cluster within the same namespace. +**Description:** Test verifying the functionality of using multiple NodePort listeners in a Kafka cluster within the same namespace. **Steps:** @@ -114,7 +114,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Retrieve test cases for Kafka Listener Type ROUTE. | Test cases for ROUTE are retrieved. | +| 1. | Retrieve test cases for Kafka Listener Type Route. | Test cases for Route are retrieved. | | 2. | Run listener tests using the retrieved test cases and cluster name. | Listener tests run successfully with no errors. | **Labels:** diff --git a/development-docs/systemtests/labels/kafka.md b/development-docs/systemtests/labels/kafka.md index 28c2eebf013..c4edc300caa 100644 --- a/development-docs/systemtests/labels/kafka.md +++ b/development-docs/systemtests/labels/kafka.md @@ -11,6 +11,7 @@ These tests are crucial to ensure that Kafka clusters can handle production work - [testReadOnlyRootFileSystem](../io.strimzi.systemtest.kafka.KafkaST.md) - [testLabelsExistenceAndManipulation](../io.strimzi.systemtest.kafka.KafkaST.md) - [testSendMessagesCustomListenerTlsScramSha](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) +- [testDynConfiguration](../io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md) - [testKafkaJBODDeleteClaimsTrueFalse](../io.strimzi.systemtest.kafka.KafkaST.md) - [testCertificateWithNonExistingDataKey](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) - [testResizeJbodVolumes](../io.strimzi.systemtest.kafka.KafkaST.md) diff --git a/systemtest/src/main/java/io/strimzi/systemtest/Environment.java b/systemtest/src/main/java/io/strimzi/systemtest/Environment.java index 6f887bacad3..759fe709f55 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/Environment.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/Environment.java @@ -145,7 +145,7 @@ public class Environment { public static final String STRIMZI_USE_KRAFT_IN_TESTS_ENV = "STRIMZI_USE_KRAFT_IN_TESTS"; /** - * Controls whether tests should run with Node Pools or not + * Controls whether tests should run with KafkaNodePools or not */ public static final String STRIMZI_USE_NODE_POOLS_IN_TESTS_ENV = "STRIMZI_USE_NODE_POOLS_IN_TESTS"; diff --git a/systemtest/src/main/java/io/strimzi/systemtest/annotations/MixedRoleNotSupported.java b/systemtest/src/main/java/io/strimzi/systemtest/annotations/MixedRoleNotSupported.java index 83d8f81220e..8bbbc36053d 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/annotations/MixedRoleNotSupported.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/annotations/MixedRoleNotSupported.java @@ -15,5 +15,5 @@ @Retention(RetentionPolicy.RUNTIME) @ExtendWith(MixedRoleNotSupportedCondition.class) public @interface MixedRoleNotSupported { - String value() default "Mixed role in Kafka Node Pools is not supported with configuration in this test case."; + String value() default "Mixed role in KafkaNodePools is not supported with configuration in this test case."; } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java index 57823ef6b73..8e514649707 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java @@ -137,7 +137,7 @@ class ConnectST extends AbstractST { @Step(value = "Annotate for manual rolling update.", expected = "KafkaConnect components are annotated for a manual rolling update."), @Step(value = "Perform and wait for rolling update.", expected = "KafkaConnect components roll and new pods are deployed."), @Step(value = "Kafka Connect pod.", expected = "Pod configurations and annotations are verified."), - @Step(value = "Kafka Connectors.", expected = "Various Kafka Connect resource labels and configurations are verified to ensure correct deployment.") + @Step(value = "KafkaConnectors.", expected = "Various Kafka Connect resource labels and configurations are verified to ensure correct deployment.") }, labels = { @Label(value = TestDocsLabels.CONNECT) @@ -358,10 +358,10 @@ void testKafkaConnectWithPlainAndScramShaAuthentication() { @Step(value = "Create and wait for the broker and controller pools", expected = "Broker and controller pools are created and running."), @Step(value = "Deploy and configure Kafka Connect with File Sink Plugin", expected = "Kafka Connect with File Sink Plugin is deployed and configured."), @Step(value = "Deploy Network Policies for Kafka Connect", expected = "Network Policies are successfully deployed for Kafka Connect."), - @Step(value = "Create and wait for Kafka Connector", expected = "Kafka Connector is created and running."), + @Step(value = "Create and wait for KafkaConnector", expected = "KafkaConnector is created and running."), @Step(value = "Deploy and configure scraper pod", expected = "Scraper pod is deployed and configured."), @Step(value = "Deploy and configure Kafka clients", expected = "Kafka clients are deployed and configured."), - @Step(value = "Execute assertions to verify the Kafka Connector configuration and status", expected = "Assertions confirm the Kafka Connector is successfully deployed, has the correct configuration, and is running.") + @Step(value = "Execute assertions to verify the KafkaConnector configuration and status", expected = "Assertions confirm the KafkaConnector is successfully deployed, has the correct configuration, and is running.") }, labels = { @Label(value = TestDocsLabels.CONNECT) @@ -709,9 +709,9 @@ void testSecretsWithKafkaConnectWithTlsAndScramShaAuthentication() { description = @Desc("Test the automatic restart functionality of Kafka Connect tasks when they fail."), steps = { @Step(value = "Create test storage instance", expected = "Test storage instance is created"), - @Step(value = "Create node pool resources", expected = "Node pool resources are created and waited for readiness"), + @Step(value = "Create KafkaNodePool resources", expected = "KafkaNodePool resources are created and waited for readiness"), @Step(value = "Create Kafka cluster", expected = "Kafka cluster is created and waited for readiness"), - @Step(value = "Deploy EchoSink Kafka Connector with autor restart enabled", expected = "Kafka Connector is created with auto-restart enabled"), + @Step(value = "Deploy EchoSink KafkaConnector with autor restart enabled", expected = "KafkaConnector is created with auto-restart enabled"), @Step(value = "Send first batch of messages", expected = "First batch of messages is sent to the topic"), @Step(value = "Ensure connection success for the first batch", expected = "Successfully produce the first batch of messages"), @Step(value = "Send second batch of messages", expected = "Second batch of messages is sent to the topic"), @@ -937,8 +937,8 @@ void testCustomAndUpdatedValues() { @Step(value = "Create broker and controller KafkaNodePools.", expected = "Broker and controller KafkaNodePools created successfully"), @Step(value = "Deploy Kafka cluster in ephemeral mode", expected = "Kafka cluster deployed successfully"), @Step(value = "Create Kafka Connect cluster with default image", expected = "Kafka Connect cluster created with appropriate configuration"), - @Step(value = "Create and configure Kafka Connector", expected = "Kafka Connector deployed and configured with correct settings"), - @Step(value = "Verify the status of the Kafka Connector", expected = "Kafka Connector status retrieved and worker node identified"), + @Step(value = "Create and configure KafkaConnector", expected = "KafkaConnector deployed and configured with correct settings"), + @Step(value = "Verify the status of the KafkaConnector", expected = "KafkaConnector status retrieved and worker node identified"), @Step(value = "Deploy Kafka clients for producer and consumer", expected = "Kafka producer and consumer clients deployed"), @Step(value = "Verify that Kafka Connect writes messages to the specified file sink", expected = "Messages successfully written to the file sink by Kafka Connect") }, @@ -1099,7 +1099,7 @@ void testConnectTlsAuthWithWeirdUserName() { @Step(value = "Create Kafka Topic", expected = "Topic created successfully"), @Step(value = "Create Kafka SCRAM-SHA-512 user with a weird username", expected = "User created successfully with SCRAM-SHA-512 credentials"), @Step(value = "Deploy Kafka Connect with SCRAM-SHA-512 authentication", expected = "Kafka Connect instance deployed and configured with user credentials"), - @Step(value = "Deploy Kafka Connector", expected = "Kafka Connector deployed and configured successfully"), + @Step(value = "Deploy KafkaConnector", expected = "KafkaConnector deployed and configured successfully"), @Step(value = "Send messages using the configured client", expected = "Messages sent successfully"), @Step(value = "Verify that connector receives messages", expected = "Messages consumed by the connector and written to the specified sink") }, @@ -1238,12 +1238,12 @@ void testScaleConnectWithoutConnectorToZero() { @Step(value = "Create broker and controller KafkaNodePools.", expected = "Broker and Controller KafkaNodePools are created"), @Step(value = "Create ephemeral Kafka cluster", expected = "Kafka cluster with 3 replicas is created"), @Step(value = "Create Kafka Connect with file plugin", expected = "Kafka Connect is created with 2 replicas and file plugin"), - @Step(value = "Create Kafka Connector", expected = "Kafka Connector is created with necessary configurations"), + @Step(value = "Create KafkaConnector", expected = "KafkaConnector is created with necessary configurations"), @Step(value = "Check Kafka Connect pods", expected = "There are 2 Kafka Connect pods"), @Step(value = "Scale down Kafka Connect to zero", expected = "Kafka Connect is scaled down to 0 replicas"), @Step(value = "Wait for Kafka Connect to be ready", expected = "Kafka Connect readiness is verified"), - @Step(value = "Wait for Kafka Connector to not be ready", expected = "Kafka Connector readiness is verified"), - @Step(value = "Verify conditions", expected = "Pod size is 0, Kafka Connect is ready, Kafka Connector is not ready due to zero replicas") + @Step(value = "Wait for KafkaConnector to not be ready", expected = "KafkaConnector readiness is verified"), + @Step(value = "Verify conditions", expected = "Pod size is 0, Kafka Connect is ready, KafkaConnector is not ready due to zero replicas") }, labels = { @Label(value = TestDocsLabels.CONNECT) @@ -1300,11 +1300,11 @@ void testScaleConnectWithConnectorToZero() { @Tag(CONNECTOR_OPERATOR) @Tag(COMPONENT_SCALING) @TestDoc( - description = @Desc("This test verifies the scaling functionality of Kafka Connect and Kafka Connector subresources."), + description = @Desc("This test verifies the scaling functionality of Kafka Connect and KafkaConnector subresources."), steps = { @Step(value = "Initialize the test storage and create broker and controller pools", expected = "Broker and controller pools are created successfully"), @Step(value = "Create KafkaNodePools using resourceManager based on the configuration", expected = "KafkaNodePools for broker and controller are created or not based on configuration"), - @Step(value = "Deploy Kafka, Kafka Connect and Kafka Connector resources", expected = "Kafka, Kafka Connect and Kafka Connector resources are deployed successfully"), + @Step(value = "Deploy Kafka, Kafka Connect and KafkaConnector resources", expected = "Kafka, Kafka Connect and KafkaConnector resources are deployed successfully"), @Step(value = "Scale Kafka Connect subresource", expected = "Kafka Connect subresource is scaled successfully"), @Step(value = "Verify Kafka Connect subresource scaling", expected = "Kafka Connect replicas and observed generation are as expected"), @Step(value = "Scale Kafka Connector subresource", expected = "Kafka Connector subresource task max is set correctly"), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java index 2e3a6f7e231..5e45a662e09 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java @@ -60,7 +60,7 @@ @SuiteDoc( description = @Desc("This test suite, verify configuration of the Cruise Control component."), beforeTestSteps = { - @Step(value = "Set up the Cluster Operator", expected = "Cluster Operator is installed and running") + @Step(value = "Set up the cluster operator", expected = "Cluster operator is installed and running") }, labels = { @Label(value = TestDocsLabels.CRUISE_CONTROL) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java index 8b79ad88565..d8ba47f2369 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java @@ -45,7 +45,7 @@ @Tag(REGRESSION) @SuiteDoc( - description = @Desc("This test verifies Kafka Connect using ConfigMap and EnvVar configuration."), + description = @Desc("This test suite verifies Kafka Connect using ConfigMap and EnvVar configuration."), beforeTestSteps = { @Step(value = "Deploy cluster operator across all namespaces, with custom configuration.", expected = "Cluster operator is deployed.") }, @@ -59,12 +59,12 @@ public class ConfigProviderST extends AbstractST { @ParallelNamespaceTest @TestDoc( - description = @Desc("Tests to ensure Kafka Connect functions correctly using ConfigMap and EnvVar configuration."), + description = @Desc("Test to ensure Kafka Connect functions correctly using ConfigMap and EnvVar configuration."), steps = { @Step(value = "Create broker and controller KafkaNodePools.", expected = "Resources are created and are in ready state."), - @Step(value = "Create Kafka cluster.", expected = "Kafka cluster is ready with 3 brokers."), + @Step(value = "Create Kafka cluster.", expected = "Kafka cluster is ready"), @Step(value = "Create ConfigMap for connector configuration.", expected = "ConfigMap with connector configuration is created."), - @Step(value = "Deploy Kafka Connect with external configuration.", expected = "Kafka Connect is deployed with proper configuration."), + @Step(value = "Deploy Kafka Connect with external configuration from ConfigMap.", expected = "Kafka Connect is deployed with proper configuration."), @Step(value = "Create necessary Role and RoleBinding for connector.", expected = "Role and RoleBinding are created and applied."), @Step(value = "Deploy Kafka connector.", expected = "Kafka connector is successfully deployed."), @Step(value = "Deploy Kafka clients.", expected = "Kafka clients are deployed and ready."), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java index e1279d4ca8d..4b1ae3e6b48 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java @@ -72,10 +72,10 @@ public class KafkaNodePoolST extends AbstractST { @TestDoc( description = @Desc("This test case verifies the management of broker IDs in KafkaNodePools using annotations."), steps = { - @Step(value = "Deploy a Kafka instance with annotations to manage node pools and one initial node pool to hold topics and act as controller.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool custom resource, with IDs 90, 91."), - @Step(value = "Deploy additional 2 node pools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]).", expected = "node pools are deployed, node pool A contains ID 4, node pool B contains IDs 6, 0."), - @Step(value = "Annotate node pool A 'next-node-ids' and node pool B 'remove-node-ids' respectively ([20-21],[6,55]) afterward scale to 4 and 1 replicas resp.", expected = "node pools are scaled, node pool A contains IDs 4, 20, 21, 1. node pool B contains ID 0."), - @Step(value = "Annotate node pool A 'remove-node-ids' and node pool B 'next-node-ids' respectively ([20],[1]) afterward scale to 2 and 6 replicas resp.", expected = "node pools are scaled, node pool A contains IDs 1, 4. node pool B contains IDs 2, 3, 5.") + @Step(value = "Deploy a Kafka instance with annotations to manage KafkaNodePools and one initial KafkaNodePool to hold topics and act as controller.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool CustomResource, with IDs 90, 91."), + @Step(value = "Deploy additional 2 KafkaNodePools (A,B) with 1 and 2 replicas, and preset 'next-node-ids' annotations holding resp. values ([4],[6]).", expected = "KafkaNodePools are deployed, KafkaNodePool A contains ID 4, KafkaNodePool B contains IDs 6, 0."), + @Step(value = "Annotate KafkaNodePool A 'next-node-ids' and KafkaNodePool B 'remove-node-ids' respectively ([20-21],[6,55]) afterward scale to 4 and 1 replicas resp.", expected = "KafkaNodePools are scaled, KafkaNodePool A contains IDs 4, 20, 21, 1. KafkaNodePool B contains ID 0."), + @Step(value = "Annotate KafkaNodePool A 'remove-node-ids' and KafkaNodePool B 'next-node-ids' respectively ([20],[1]) afterward scale to 2 and 6 replicas resp.", expected = "KafkaNodePools are scaled, KafkaNodePool A contains IDs 1, 4. KafkaNodePool B contains IDs 2, 3, 5.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -108,7 +108,7 @@ void testKafkaNodePoolBrokerIdsManagementUsingAnnotations() { PodUtils.waitUntilPodStabilityReplicasCount(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(testStorage.getClusterName(), nodePoolNameInitial), 2); - LOGGER.info("Testing deployment of node pools with pre-configured annotation: {} is creating Brokers with correct IDs", Annotations.ANNO_STRIMZI_IO_NODE_POOLS); + LOGGER.info("Testing deployment of KafkaNodePools with pre-configured annotation: {} is creating Brokers with correct IDs", Annotations.ANNO_STRIMZI_IO_NODE_POOLS); // Deploy NodePool A with only 1 replica and next ID 4, and NodePool B with 2 replica and next ID 6 resourceManager.createResourceWithWait(KafkaNodePoolTemplates.brokerPoolPersistentStorage(testStorage.getNamespaceName(), nodePoolNameA, testStorage.getClusterName(), 1) @@ -176,12 +176,14 @@ void testKafkaNodePoolBrokerIdsManagementUsingAnnotations() { @TestDoc( description = @Desc("This test case verifies changing of roles in KafkaNodePools."), steps = { - @Step(value = "Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools, both with mixed role, first one stable, second one which will be modified.", expected = "Kafka instance with initial node pools is deployed."), - @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present.", expected = "KafkaTopic is created."), - @Step(value = "Annotate one of node pools to perform manual Rolling Update.", expected = "Rolling Update started."), - @Step(value = "Change role ofKafkaNodePool from mixed to controller only role.", expected = "Role Change is prevented due to existing KafkaTopic replicas and ongoing Rolling Update."), - @Step(value = "Original Rolling Update finishes successfully.", expected = "Rolling Update is completed."), - @Step(value = "Delete previously created KafkaTopic.", expected = "KafkaTopic is deleted and Node Pool role change is initiated."), + @Step(value = "Deploy a Kafka instance with annotations to manage KafkaNodePools and 2 initial KafkaNodePools, both with mixed role, first one stable, second one which will be modified.", expected = "Kafka instance with initial KafkaNodePools is deployed."), + @Step(value = "Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present.", expected = "KafkaTopic created."), + @Step(value = "Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully."), + @Step(value = "Remove KafkaTopic.", expected = "KafkaTopic is cleaned as expected."), + @Step(value = "Annotate one of KafkaNodePools to perform manual rolling update.", expected = "rolling update started."), + @Step(value = "Change role ofKafkaNodePool from mixed to controller only role.", expected = "Role Change is prevented due to existing KafkaTopic replicas and ongoing rolling update."), + @Step(value = "Original rolling update finishes successfully.", expected = "rolling update is completed."), + @Step(value = "Delete previously created KafkaTopic.", expected = "KafkaTopic is deleted and KafkaNodePool role change is initiated."), @Step(value = "Change role ofKafkaNodePool from controller only to mixed role.", expected = "KafkaNodePool changes role to mixed role."), @Step(value = "Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present.", expected = "Messages are produced and consumed successfully.") }, @@ -199,7 +201,7 @@ void testNodePoolsRolesChanging() { final LabelSelector volatilePoolLabelSelector = KafkaNodePoolResource.getLabelSelector(testStorage.getClusterName(), volatileRolePoolName, ProcessRoles.CONTROLLER); - // Stable Node Pool for purpose of having at least 3 brokers and 3 controllers all the time. + // Stable KafkaNodePool for purpose of having at least 3 brokers and 3 controllers all the time. resourceManager.createResourceWithWait( NodePoolsConverter.convertNodePoolsIfNeeded( KafkaNodePoolTemplates.brokerPool(testStorage.getNamespaceName(), testStorage.getBrokerPoolName(), testStorage.getClusterName(), 3).build(), @@ -232,7 +234,7 @@ void testNodePoolsRolesChanging() { LOGGER.info("Wait for warning message in Kafka {}/{}", testStorage.getNamespaceName(), testStorage.getClusterName()); KafkaUtils.waitUntilKafkaStatusConditionContainsMessage(testStorage.getNamespaceName(), testStorage.getClusterName(), ".*Reverting role change.*"); - LOGGER.info("Wait for (original) Rolling Update to finish successfully"); + LOGGER.info("Wait for (original) rolling update to finish successfully"); volatilePoolPodsSnapshot = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), volatilePoolLabelSelector, 3, volatilePoolPodsSnapshot); // remove topic which blocks role change (removal of broker role thus decreasing number of broker nodes available) @@ -257,12 +259,18 @@ void testNodePoolsRolesChanging() { @TestDoc( description = @Desc("This test case verifies the possibility of adding and removing KafkaNodePools into an existing Kafka cluster."), steps = { - @Step(value = "Deploy a Kafka instance with annotations to manage node pools and 2 initial node pools.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool custom resource."), - @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully, KafkaTopic created and cleaned as expected."), + @Step(value = "Deploy a Kafka instance with annotations to manage KafkaNodePools and 2 initial KafkaNodePools.", expected = "Kafka instance is deployed according to Kafka and KafkaNodePool CustomResource."), + @Step(value = "Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present.", expected = "KafkaTopic created."), + @Step(value = "Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully."), + @Step(value = "Remove KafkaTopic.", expected = "KafkaTopic is cleaned as expected."), @Step(value = "Add extra KafkaNodePool with broker role to the Kafka.", expected = "KafkaNodePool is deployed and ready."), - @Step(value = "Create KafkaTopic with replica number requiring all Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully, KafkaTopic created and cleaned as expected."), - @Step(value = "Remove one kafkaNodePool with broker role.", expected = "KafkaNodePool is removed, Pods are deleted, but other pods in Kafka are stable and ready."), - @Step(value = "Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present, Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully, KafkaTopic created and cleaned as expected.") + @Step(value = "Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present.", expected = "KafkaTopic created."), + @Step(value = "Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully."), + @Step(value = "Remove KafkaTopic.", expected = "KafkaTopic is cleaned as expected."), + @Step(value = "Remove one KafkaNodePool with broker role.", expected = "KafkaNodePool is removed, Pods are deleted, but other pods in Kafka are stable and ready."), + @Step(value = "Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present.", expected = "KafkaTopic created."), + @Step(value = "Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully."), + @Step(value = "Remove KafkaTopic.", expected = "KafkaTopic is cleaned as expected.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -270,7 +278,7 @@ void testNodePoolsRolesChanging() { ) void testNodePoolsAdditionAndRemoval() { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); - // node pools name convention is 'A' for all roles (: if possible i.e. based on feature gate) 'B' for broker roles. + // KafkaNodePools name convention is 'A' for all roles (: if possible i.e. based on feature gate) 'B' for broker roles. final String poolAName = testStorage.getBrokerPoolName() + "-a"; final String poolB1Name = testStorage.getBrokerPoolName() + "-b1"; final String poolB2NameAdded = testStorage.getBrokerPoolName() + "-b2-added"; @@ -304,7 +312,7 @@ void testNodePoolsAdditionAndRemoval() { KafkaNodePoolUtils.waitForKafkaNodePoolPodsReady(testStorage, poolB2NameAdded, ProcessRoles.BROKER, brokerNodePoolReplicaCount); - // replica count of this KafkaTopic will require that new brokers were correctly added into Kafka Cluster + // replica count of this KafkaTopic will require that new brokers were correctly added into Kafka cluster transmitMessagesWithNewTopicAndClean(testStorage, 5); LOGGER.info("Delete KafkaNodePool: {}/{} and wait for Kafka pods stability", testStorage.getNamespaceName(), poolB1Name); @@ -317,15 +325,15 @@ void testNodePoolsAdditionAndRemoval() { @ParallelNamespaceTest @TestDoc( - description = @Desc("This test verifies Kafka Cluster migration to and from node pools, using the necessary Kafka and KafkaNodePool resources and annotations."), + description = @Desc("This test verifies Kafka cluster migration to and from KafkaNodePools, using the necessary Kafka and KafkaNodePool resources and annotations."), steps = { - @Step(value = "Deploy a Kafka cluster with the annotation to enable node pool management, and configure a KafkaNodePool resource to target the Kafka cluster.", expected = "Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected."), - @Step(value = "Modify KafkaNodePool by increasing number of Kafka Replicas.", expected = "Number of Kafka Pods is increased to match specification from KafkaNodePool."), - @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages."), - @Step(value = "Disable KafkaNodePool management in the Kafka custom resource using the node pool annotation.", expected = " StrimziPodSet is modified, pods are replaced, and any KafkaNodePool specifications (i.e., changed replica count) are ignored."), - @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages."), - @Step(value = "Enable node pool management in the Kafka custom resource using the node pool annotation.", expected = "New StrimziPodSet is created, pods are replaced , and any KafkaNodePool specifications (i.e., changed replica count) take priority over Kafka specifications."), - @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages.") + @Step(value = "Deploy a Kafka cluster with the annotation to enable KafkaNodePool management, and configure a KafkaNodePool resource to target the Kafka cluster.", expected = "Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected."), + @Step(value = "Modify KafkaNodePool by increasing number of Kafka replicas.", expected = "Number of Kafka Pods is increased to match specification from KafkaNodePool."), + @Step(value = "Produce and consume messages in given Kafka cluster.", expected = "Clients can produce and consume messages."), + @Step(value = "Disable KafkaNodePool management in the Kafka CustomResource using the KafkaNodePool annotation.", expected = " StrimziPodSet is modified, pods are replaced, and any KafkaNodePool specifications (i.e., changed replica count) are ignored."), + @Step(value = "Produce and consume messages in given Kafka cluster.", expected = "Clients can produce and consume messages."), + @Step(value = "Enable KafkaNodePool management in the Kafka CustomResource using the KafkaNodePool annotation.", expected = "New StrimziPodSet is created, pods are replaced , and any KafkaNodePool specifications (i.e., changed replica count) take priority over Kafka specifications."), + @Step(value = "Produce and consume messages in given Kafka cluster.", expected = "Clients can produce and consume messages.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -337,7 +345,7 @@ void testKafkaManagementTransferToAndFromKafkaNodePool() { final int nodePoolIncreasedKafkaReplicaCount = 5; final String kafkaNodePoolName = "kafka"; - LOGGER.info("Deploying Kafka Cluster: {}/{} controlled by KafkaNodePool: {}", testStorage.getNamespaceName(), testStorage.getClusterName(), kafkaNodePoolName); + LOGGER.info("Deploying Kafka cluster: {}/{} controlled by KafkaNodePool: {}", testStorage.getNamespaceName(), testStorage.getClusterName(), kafkaNodePoolName); final Kafka kafkaCr = KafkaTemplates.kafkaPersistentNodePools(testStorage.getNamespaceName(), testStorage.getClusterName(), originalKafkaReplicaCount, 3).build(); @@ -377,10 +385,10 @@ void testKafkaManagementTransferToAndFromKafkaNodePool() { ); ClientUtils.waitForInstantClientSuccess(testStorage); - LOGGER.info("Disable KafkaNodePool in Kafka Cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getClusterName()); + LOGGER.info("Disable KafkaNodePool in Kafka cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getClusterName()); KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> { kafka.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "disabled"); - // because Kafka CR with node pools is missing .spec.kafka.replicas and .spec.kafka.storage, we need to + // because Kafka CR with KafkaNodePools is missing .spec.kafka.replicas and .spec.kafka.storage, we need to // set those here kafka.getSpec().getKafka().setReplicas(originalKafkaReplicaCount); kafka.getSpec().getKafka().setStorage(new PersistentClaimStorageBuilder() @@ -406,7 +414,7 @@ void testKafkaManagementTransferToAndFromKafkaNodePool() { ); ClientUtils.waitForInstantClientSuccess(testStorage); - LOGGER.info("Enable KafkaNodePool in Kafka Cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getClusterName()); + LOGGER.info("Enable KafkaNodePool in Kafka cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getClusterName()); KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> { kafka.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled"); kafka.getSpec().getKafka().setReplicas(null); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java index 35bcc53acc3..de676697ca1 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java @@ -113,9 +113,9 @@ @Tag(REGRESSION) @SuppressWarnings("checkstyle:ClassFanOutComplexity") @SuiteDoc( - description = @Desc("Test suite containing kafka related stuff (i.e., JVM resources, EO, TO or UO removal from Kafka cluster), which ensures proper functioning of Kafka clusters."), + description = @Desc("Test suite containing Kafka related stuff (i.e., JVM resources, EO, TO or UO removal from Kafka cluster), which ensures proper functioning of Kafka clusters."), beforeTestSteps = { - @Step(value = "Deploy Cluster Operator across all namespaces, with custom configuration.", expected = "Cluster Operator is deployed.") + @Step(value = "Deploy cluster operator across all namespaces, with custom configuration.", expected = "Cluster operator is deployed.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -399,9 +399,9 @@ void testRemoveComponentsFromEntityOperator() { steps = { @Step(value = "Deploy Kafka with persistent storage and JBOD storage with 2 volumes, both of which are configured to delete their Persistent Volume Claims on Kafka cluster un-provision.", expected = "Kafka is deployed, volumes are labeled and linked to Pods correctly."), @Step(value = "Verify that labels in Persistent Volume Claims are set correctly.", expected = "Persistent Volume Claims contains expected labels and values."), - @Step(value = "Modify Kafka Custom Resource, specifically 'deleteClaim' property of its first Kafka Volume.", expected = "Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by Cluster Operator."), + @Step(value = "Modify Kafka CustomResource, specifically 'deleteClaim' property of its first Kafka Volume.", expected = "Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by cluster operator."), @Step(value = "Delete Kafka cluster.", expected = "Kafka cluster and its components are deleted, including Persistent Volume Claim of Volume with 'deleteClaim' property set to true."), - @Step(value = "Verify remaining Persistent Volume Claims.", expected = "Persistent Volume Claim referenced by volume of formerly deleted Kafka Custom Resource with property 'deleteClaim' set to true is still present.") + @Step(value = "Verify remaining Persistent Volume Claims.", expected = "Persistent Volume Claim referenced by volume of formerly deleted Kafka CustomResource with property 'deleteClaim' set to true is still present.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -1223,9 +1223,9 @@ void testResizeJbodVolumes() { @ParallelNamespaceTest() @TestDoc( - description = @Desc("This test case verifies basic working of Kafka Cluster managed by Cluster Operator with KRaft."), + description = @Desc("This test case verifies basic working of Kafka Cluster managed by cluster operator with KRaft."), steps = { - @Step(value = "Deploy Kafka annotated to enable KRaft (and additionally annotated to enable node pool management), and configure a KafkaNodePool resource to target the Kafka cluster.", expected = "Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected."), + @Step(value = "Deploy Kafka annotated to enable KRaft (and additionally annotated to enable KafkaNodePool management), and configure a KafkaNodePool resource to target the Kafka cluster.", expected = "Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected."), @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages."), @Step(value = "Trigger manual Rolling Update.", expected = "Rolling update is triggered and completed shortly after.") }, diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java index 324ab20eaf6..ba2e713079e 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java @@ -76,7 +76,7 @@ public class TieredStorageST extends AbstractST { description = @Desc("This testcase is focused on testing of Tiered Storage integration implemented within Strimzi. The tests use Aiven Tiered Storage plugin (tiered-storage-for-apache-kafka)."), steps = { @Step(value = "Deploys KafkaNodePool resource with PV of size 10Gi.", expected = "KafkaNodePool resource is deployed successfully with specified configuration."), - @Step(value = "Deploys Kafka resource with configuration of Tiered Storage for Aiven plugin, pointing to Minio S3, and with image built in beforeAll.", expected = "Kafka resource is deployed successfully with Tiered Storage configuration."), + @Step(value = "Deploy Kafka CustomResource with Tiered Storage configuration pointing to Minio S3, using a built Kafka image. Reduce the `remote.log.manager.task.interval.ms` and `log.retention.check.interval.ms` to minimize delays during log uploads and deletions.", expected = "Kafka CustomResource is deployed successfully with optimized intervals to speed up log uploads and local log deletions."), @Step(value = "Creates topic with enabled Tiered Storage sync with size of segments set to 10mb (this is needed to speed up the sync).", expected = "Topic is created successfully with Tiered Storage enabled and segment size of 10mb."), @Step(value = "Starts continuous producer to send data to Kafka.", expected = "Continuous producer starts sending data to Kafka."), @Step(value = "Wait until Minio size is not empty (contains data from Kafka).", expected = "Minio contains data from Kafka.") diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java index 7053e7e105d..c0ff45ba4ba 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java @@ -63,9 +63,9 @@ @Tag(REGRESSION) @Tag(DYNAMIC_CONFIGURATION) @SuiteDoc( - description = @Desc("DynamicConfST is responsible for verifying that changes in dynamic Kafka configuration do not trigger a rolling update."), + description = @Desc("Responsible for verifying that changes in dynamic Kafka configuration do not trigger a rolling update."), beforeTestSteps = { - @Step(value = "Deploy the Cluster Operator.", expected = "Cluster Operator is installed successfully.") + @Step(value = "Deploy the cluster operator.", expected = "Cluster operator is installed successfully.") } ) public class DynamicConfST extends AbstractST { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java index 5b88f2da459..f1b6e0cad5e 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java @@ -8,6 +8,7 @@ import io.skodjob.annotations.Label; import io.skodjob.annotations.Step; import io.skodjob.annotations.SuiteDoc; +import io.skodjob.annotations.TestDoc; import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.kafka.config.model.ConfigModel; import io.strimzi.kafka.config.model.Type; @@ -50,7 +51,7 @@ @SuiteDoc( description = @Desc("DynamicConfigurationSharedST is responsible for verifying that changing dynamic Kafka configuration will not trigger a rolling update. Shared -> for each test case we use the same Kafka resource configuration."), beforeTestSteps = { - @Step(value = "Run Cluster Operator installation.", expected = "Cluster Operator is installed."), + @Step(value = "Run cluster operator installation.", expected = "Cluster operator is installed."), @Step(value = "Deploy shared Kafka across all test cases.", expected = "Shared Kafka is deployed."), @Step(value = "Deploy scraper pod.", expected = "Scraper pod is deployed.") }, @@ -68,6 +69,18 @@ public class DynamicConfSharedST extends AbstractST { private String scraperPodName; private static Random rng = new Random(); + @TestDoc( + description = @Desc("This test dynamically selects and applies three Kafka dynamic configuration properties to verify that the changes do not trigger a rolling update in the Kafka cluster. It applies the configurations, waits for stability, and then verifies that the new configuration is applied both to the CustomResource (CR) and the running Kafka pods."), + steps = { + @Step(value = "Randomly choose three configuration properties for dynamic update.", expected = "Three configurations are selected without duplication."), + @Step(value = "Apply the chosen configuration properties to the Kafka CustomResource.", expected = "The configurations are applied successfully without triggering a rolling update."), + @Step(value = "Verify the applied configuration on both the Kafka CustomResource and the Kafka pods.", expected = "The applied configurations are correctly reflected in the Kafka CustomResource and the kafka pods.") + }, + labels = { + @Label(value = TestDocsLabels.DYNAMIC_CONFIGURATION), + @Label(value = TestDocsLabels.KAFKA) + } + ) @TestFactory Iterator testDynConfiguration() { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java index 8be2ff7d2b2..9a468c0c0d2 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java @@ -180,7 +180,7 @@ void testSendMessagesPlainAnonymous() { @TestDoc( description = @Desc("Test sending messages over tls transport using mutual tls auth."), steps = { - @Step(value = "CreateKafkaNodePool resources.", expected = "Persistent storage node pools are created."), + @Step(value = "CreateKafkaNodePool resources.", expected = "Persistent storage KafkaNodePools are created."), @Step(value = "Disable plain listener and enable tls listener in Kafka resource.", expected = "Kafka with plain listener disabled and tls listener enabled is created."), @Step(value = "Create Kafka topic and user.", expected = "Kafka topic and tls user are created."), @Step(value = "Configure and deploy Kafka clients.", expected = "Kafka clients producer and consumer with tls are deployed."), @@ -417,7 +417,7 @@ void testSendMessagesTlsScramSha() { @TestDoc( description = @Desc("Test custom listener configured with scram SHA authentication and TLS."), steps = { - @Step(value = "Create a Kafka cluster with broker and controller node pools.", expected = "Kafka cluster is created with node pools."), + @Step(value = "Create a Kafka cluster with broker and controller KafkaNodePools.", expected = "Kafka cluster is created with KafkaNodePools."), @Step(value = "Create a Kafka cluster with custom listener using TLS and SCRAM-SHA authentication.", expected = "Kafka cluster with custom listener is ready."), @Step(value = "Create a Kafka topic and SCRAM-SHA user.", expected = "Kafka topic and user are created."), @Step(value = "Transmit messages over TLS using SCRAM-SHA authentication.", expected = "Messages are transmitted successfully.") @@ -672,7 +672,7 @@ void testOverrideNodePortConfiguration() { @TestDoc( description = @Desc("Test the NodePort TLS functionality for Kafka brokers in a Kubernetes environment."), steps = { - @Step(value = "Create Kafka broker and controller node pools.", expected = "Broker and controller node pools are created"), + @Step(value = "Create Kafka broker and controller KafkaNodePools.", expected = "Broker and controller KafkaNodePools are created"), @Step(value = "Deploy Kafka cluster with NodePort listener and TLS enabled", expected = "Kafka cluster is deployed with NodePort listener and TLS"), @Step(value = "Create a Kafka topic", expected = "Kafka topic is created"), @Step(value = "Create a Kafka user with TLS authentication", expected = "Kafka user with TLS authentication is created"), @@ -791,7 +791,7 @@ void testLoadBalancer() { @TestDoc( description = @Desc("Test validating the TLS connection through a Kafka LoadBalancer."), steps = { - @Step(value = "Create and configure KafkaNodePools", expected = "Node pools for brokers and controllers are created"), + @Step(value = "Create and configure KafkaNodePools", expected = "KafkaNodePools for brokers and controllers are created"), @Step(value = "Create and configure Kafka cluster with TLS listener", expected = "Kafka cluster with TLS enabled LoadBalancer listener is created"), @Step(value = "Create and configure Kafka user with TLS authentication", expected = "Kafka user with TLS authentication is created"), @Step(value = "Wait for the LoadBalancer address to be reachable", expected = "LoadBalancer address becomes reachable"), @@ -1165,7 +1165,7 @@ void testCustomChainCertificatesForNodePort() { description = @Desc("Test verifying custom solo certificates for load balancer in a Kafka cluster."), steps = { @Step(value = "Create custom secret", expected = "Custom secret is created with the specified certificate and key"), - @Step(value = "Create Kafka resources with node pools", expected = "Kafka brokers and controller pools are created and configured"), + @Step(value = "Create Kafka resources with KafkaNodePools", expected = "Kafka brokers and controller pools are created and configured"), @Step(value = "Create Kafka cluster with listeners", expected = "Kafka cluster is created with internal and load balancer listeners using the custom certificates"), @Step(value = "Create TLS user", expected = "TLS user is created"), @Step(value = "Verify produced and consumed messages via external client", expected = "Messages are successfully produced and consumed using the custom certificates"), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java index 3c8084c63e6..6a128ce45d0 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java @@ -55,7 +55,7 @@ @Tag(REGRESSION) @SuiteDoc( - description = @Desc("Test to verify the functionality of using multiple NodePorts in a Kafka cluster within the same namespace."), + description = @Desc("Test to verify the functionality of using multiple NodePort listeners in a Kafka cluster within the same namespace."), labels = { @Label(value = TestDocsLabels.KAFKA) } @@ -72,7 +72,7 @@ public class MultipleListenersST extends AbstractST { @Tag(EXTERNAL_CLIENTS_USED) @IsolatedTest("Using more tha one Kafka cluster in one namespace") @TestDoc( - description = @Desc("Test verifying the functionality of using multiple NodePorts in a Kafka cluster within the same namespace."), + description = @Desc("Test verifying the functionality of using multiple NodePort listeners in a Kafka cluster within the same namespace."), steps = { @Step(value = "Execute listener tests with NodePort configuration.", expected = "Listener tests run without issues using NodePort.") }, @@ -89,7 +89,7 @@ void testMultipleNodePorts() { @TestDoc( description = @Desc("Test to verify the usage of more than one Kafka cluster within a single namespace."), steps = { - @Step(value = "Run the internal Kafka listeners test.", expected = "Listeners test runs successfully on the specified cluster.") + @Step(value = "Run the Internal Kafka listeners test.", expected = "Listeners test runs successfully on the specified cluster.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -105,10 +105,10 @@ void testMultipleInternal() { @Tag(EXTERNAL_CLIENTS_USED) @IsolatedTest("Using more tha one Kafka cluster in one namespace") @TestDoc( - description = @Desc("Test verifying the combination of internal and external Kafka listeners."), + description = @Desc("Test verifying the combination of Internal and Enternal Kafka listeners."), steps = { @Step(value = "Check if the environment supports cluster-wide NodePort rights.", expected = "Test is skipped if the environment is not suitable."), - @Step(value = "Retrieve and combine internal and NodePort listeners.", expected = "Listeners are successfully retrieved and combined."), + @Step(value = "Retrieve and combine Internal and NodePort listeners.", expected = "Listeners are successfully retrieved and combined."), @Step(value = "Run listeners test with combined listeners.", expected = "Listeners test is executed successfully.") }, labels = { @@ -136,10 +136,10 @@ void testCombinationOfInternalAndExternalListeners() { @IsolatedTest("Using more tha one Kafka cluster in one namespace") @Tag(EXTERNAL_CLIENTS_USED) @TestDoc( - description = @Desc("Test verifying the behavior of multiple load balancers in a single namespace using more than one Kafka cluster."), + description = @Desc("Test verifying the behavior of multiple LoadBalancers in a single namespace using more than one Kafka cluster."), steps = { - @Step(value = "Run listeners test with LOADBALANCER type.", expected = "Listeners test executes successfully with load balancers."), - @Step(value = "Validate the results.", expected = "Results match the expected outcomes for multiple load balancers.") + @Step(value = "Run listeners test with LoadBalancer type.", expected = "Listeners test executes successfully with LoadBalancers."), + @Step(value = "Validate the results.", expected = "Results match the expected outcomes for multiple LoadBalancers.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -157,7 +157,7 @@ void testMultipleLoadBalancers() { @TestDoc( description = @Desc("Test to verify the functionality of multiple Kafka route listeners in a single namespace."), steps = { - @Step(value = "Retrieve test cases for Kafka Listener Type ROUTE.", expected = "Test cases for ROUTE are retrieved."), + @Step(value = "Retrieve test cases for Kafka Listener Type Route.", expected = "Test cases for Route are retrieved."), @Step(value = "Run listener tests using the retrieved test cases and cluster name.", expected = "Listener tests run successfully with no errors.") }, labels = { @@ -174,11 +174,11 @@ void testMultipleRoutes() { @Tag(EXTERNAL_CLIENTS_USED) @IsolatedTest("Using more tha one Kafka cluster in one namespace") @TestDoc( - description = @Desc("Test ensuring that different types of external Kafka listeners (ROUTE and NODEPORT) work correctly when mixed."), + description = @Desc("Test ensuring that different types of Enternal Kafka listeners (Route and NodePort) work correctly when mixed."), steps = { @Step(value = "Retrieve route listeners.", expected = "Route listeners are retrieved from test cases."), - @Step(value = "Retrieve nodeport listeners.", expected = "Nodeport listeners are retrieved from test cases."), - @Step(value = "Combine route and nodeport listeners.", expected = "Multiple different listeners list is populated."), + @Step(value = "Retrieve NodePort listeners.", expected = "Nodeport listeners are retrieved from test cases."), + @Step(value = "Combine route and NodePort listeners.", expected = "Multiple different listeners list is populated."), @Step(value = "Run listeners test.", expected = "Listeners test runs using the combined list.") }, labels = { @@ -205,9 +205,9 @@ void testMixtureOfExternalListeners() { @Tag(EXTERNAL_CLIENTS_USED) @IsolatedTest("Using more tha one Kafka cluster in one namespace") @TestDoc( - description = @Desc("Verifies the combination of every kind of Kafka listener: INTERNAL, NODEPORT, ROUTE, and LOADBALANCER."), + description = @Desc("Verifies the combination of every kind of Kafka listener: Internal, NodePort, Route, and LOADBALANCER."), steps = { - @Step(value = "Retrieve different types of Kafka listeners.", expected = "Lists of INTERNAL, NODEPORT, ROUTE, and LOADBALANCER listeners are retrieved."), + @Step(value = "Retrieve different types of Kafka listeners.", expected = "Lists of Internal, NodePort, Route, and LOADBALANCER listeners are retrieved."), @Step(value = "Combine all different listener lists.", expected = "A combined list of all Kafka listener types is created."), @Step(value = "Run listeners test with combined listener list.", expected = "Listeners test runs with all types of Kafka listeners in the combined list.") }, diff --git a/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java b/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java index acebf64aeff..68b0c7c8ae2 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java @@ -692,11 +692,11 @@ void testDynamicallySetClusterOperatorLoggingLevels() { * 1. - Deploy Kafka cluster and KafkaConnect cluster, the latter with Log level Off. * 2. - Deploy all additional resources, scraper Pod and network policies. * 3. - Verify that no logs are present in KafkaConnect Pods. - * 4. - Set inline log level to Debug in KafkaConnect custom resource. + * 4. - Set inline log level to Debug in KafkaConnect CustomResource. * - log4j.properties file for given cluster has log level Debug, and pods provide logs on respective level. - * 5. - Change inline log level from Debug to Info in KafkaConnect custom resource. + * 5. - Change inline log level from Debug to Info in KafkaConnect CustomResource. * - log4j.properties file for given cluster has log level Info, and pods provide logs on respective level. - * 6. - Create ConfigMap with necessary data for external logging and modify KafkaConnect custom resource to use external logging setting log level Off. + * 6. - Create ConfigMap with necessary data for external logging and modify KafkaConnect CustomResource to use external logging setting log level Off. * - log4j.properties file for given cluster has log level Off, and pods provide no more logs. * 7. - Disable the use of connector resources via annotations and verify KafkaConnect pod rolls. * - Verify that KafkaConnect deployment rolls @@ -1578,7 +1578,7 @@ void testLoggingHierarchy() { * @description This test case check that changing Logging configuration from internal to external triggers Rolling Update. * * @steps - * 1. - Deploy Kafka Cluster, without any logging related configuration + * 1. - Deploy Kafka cluster, without any logging related configuration * - Cluster is deployed * 2. - Modify Kafka by changing specification of logging to new external value * - Change in logging specification triggers Rolling Update diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java index c7b993c05b8..12e204fbb47 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java @@ -100,7 +100,7 @@ public class MultipleClusterOperatorsST extends AbstractST { * - Kafka is not deployed. * 5. - Verify state of metric 'strimzi_resource' for Kafka, which holds number of operated Kafka operands. * - Metric 'strimzi_resource' exposed by Cluster Operators is not present indicating no Kafka resource is being operated. - * 6. - Modify Kafka custom resource, by changing its label 'app.kubernetes.io/operator' to point to the first Cluster Operator + * 6. - Modify Kafka CustomResource, by changing its label 'app.kubernetes.io/operator' to point to the first Cluster Operator * - Kafka is deployed and operated by the first Cluster Operator. * 7. - Deploy Kafka Connect (with label 'app.kubernetes.io/operator' pointing the first Cluster Operator) and Kafka Connector * - Both operands are successfully deployed, and managed by the first Cluster Operator. @@ -231,7 +231,7 @@ void testMultipleCOsInDifferentNamespaces() { * 3. - Deploy Kafka Cluster with 3 Kafka replicas and label 'app.kubernetes.io/operator' pointing to the first Cluster Operator. * 4. - Change Kafka's label selector 'app.kubernetes.io/operator' to point to not existing Cluster Operator. * - Kafka Cluster is no longer controlled by any Cluster Operator. - * 5. - Modify Kafka custom resource, by increasing number of replicas from 3 to 4. + * 5. - Modify Kafka CustomResource, by increasing number of replicas from 3 to 4. * - Kafka is not scaled to 4 replicas. * 6. - Deploy Kafka Rebalance without 'app.kubernetes.io/operator' label. * - For a stable period of time, Kafka Rebalance is ignored as well. diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicST.java index 433ba2b4a01..3426cccb092 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicST.java @@ -411,18 +411,18 @@ void testKafkaTopicChangingMinInSyncReplicas() { } /** - * @description This test case checks that Kafka cluster will not act upon KafkaTopic Custom Resources - * which are not of its concern, i.e., KafkaTopic Custom Resources are not labeled accordingly. + * @description This test case checks that Kafka cluster will not act upon KafkaTopic CustomResources + * which are not of its concern, i.e., KafkaTopic CustomResources are not labeled accordingly. * * @steps * 1. - Deploy Kafka with short reconciliation time configured on Topic Operator * - Kafka is deployed - * 2. - Create KafkaTopic Custom Resource without any labels provided - * - KafkaTopic Custom resource is created + * 2. - Create KafkaTopic CustomResource without any labels provided + * - KafkaTopic CustomResource is created * 3. - Verify that KafkaTopic specified by created KafkaTopic is not created * - Given KafkaTopic is not present inside Kafka cluster - * 4. - Delete given KafkaTopic Custom Resource - * - KafkaTopic Custom Resource is deleted + * 4. - Delete given KafkaTopic CustomResource + * - KafkaTopic CustomResource is deleted * * @testcase * - topic-operator diff --git a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java index b95cdacd0e7..bf8b0d0c77a 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java @@ -391,18 +391,18 @@ void testKafkaPodPendingDueToRack() { /** * @description This test case verifies the rolling update behavior of Kafka controller nodes under specific conditions. - * It focuses on ensuring that changes in Kafka configuration and node pool properties affect only the intended KafkaNodePools, + * It focuses on ensuring that changes in Kafka configuration and KafkaNodePool properties affect only the intended KafkaNodePools, * particularly the controller nodes, while leaving others like broker nodes unaffected. * * @steps * 1. - Assume that KRaft mode is enabled. - * 2. - Create and deploy a Kafka node pool with broker role (brokerPool) and another with controller role (controllerPool), each with 3 replicas. + * 2. - Create and deploy a KafkaNodePool with broker role (brokerPool) and another with controller role (controllerPool), each with 3 replicas. * 3. - Take snapshots of the broker and controller pods for later comparison. * 4. - Update a specific Kafka configuration that affects only controller nodes and verify the rolling update behavior. * - Ensure that only controller nodes undergo a rolling update, while broker nodes remain unaffected. * 5. - Update a specific Kafka configuration that affects only broker nodes and verify the rolling update behavior. * - Ensure that only broker nodes undergo a rolling update, while controller node remain unaffected. - * 6. - Introduce a change in the controller node pool, such as modifying pod affinity. + * 6. - Introduce a change in the controller KafkaNodePool, such as modifying pod affinity. * - Observe and ensure that this change triggers another rolling update for the controller nodes. * 7. - Verify the rolling updates of controller nodes by comparing the snapshots taken before and after each configuration change. * @@ -454,7 +454,7 @@ void testKafkaRollingUpdatesOfSingleRoleNodePools() { // 2nd Rolling update triggered by PodAffinity - // Modify pod affinity settings for the controller node pool + // Modify pod affinity settings for the controller KafkaNodePool // Pod Affinity is expecting a running pod on a node with topologyKey with labels specify by LabelSelector PodAffinity podAffinity = new PodAffinityBuilder() .addNewRequiredDuringSchedulingIgnoredDuringExecution() @@ -481,7 +481,7 @@ void testKafkaRollingUpdatesOfSingleRoleNodePools() { RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), testStorage.getControllerSelector(), controllerPoolReplicas, controllerPoolPodsSnapshot); - // Verify that broker nodes do not roll due to the controller node pool affinity change + // Verify that broker nodes do not roll due to the controller KafkaNodePool affinity change RollingUpdateUtils.waitForNoRollingUpdate(testStorage.getNamespaceName(), testStorage.getBrokerSelector(), brokerPoolPodsSnapshot); } @@ -492,7 +492,7 @@ void testKafkaRollingUpdatesOfSingleRoleNodePools() { * * @steps * 1. - Ensure that the environment is running in KRaft mode. - * 2. - Create and deploy a Kafka node pool with mixed roles (controller and broker), consisting of 6 replicas. + * 2. - Create and deploy a KafkaNodePool with mixed roles (controller and broker), consisting of 6 replicas. * 3. - Take a snapshot of the mixed-role pods for comparison before and after the configuration change. * 4. - Update a specific Kafka configuration targeting controller roles. * 5. - Observe and verify that all mixed-role nodes undergo a rolling update in response to the configuration change. diff --git a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java index 8e3dcaf81c3..19cc481862d 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java @@ -169,7 +169,7 @@ void testRecoveryDuringZookeeperBasedRollingUpdate() { // Kafka recovery // change kafka to unreasonable CPU request causing trigger of Rolling update and recover by second modification - // if kafka node pool is enabled change specification directly in KNP CR as changing it in kafka would have no impact in case it is already specified in KNP + // if kafka KafkaNodePool is enabled change specification directly in KNP CR as changing it in kafka would have no impact in case it is already specified in KNP if (Environment.isKafkaNodePoolsEnabled()) { KafkaNodePoolResource.replaceKafkaNodePoolResourceInSpecificNamespace(testStorage.getNamespaceName(), testStorage.getBrokerPoolName(), knp -> { knp.getSpec() @@ -195,7 +195,7 @@ void testRecoveryDuringZookeeperBasedRollingUpdate() { ClientUtils.waitForInstantConsumerClientSuccess(testStorage); LOGGER.info("Recover Kafka {}/{} from pending state by modifying its resource request to realistic value", testStorage.getClusterName(), testStorage.getNamespaceName()); - // if kafka node pool is enabled change specification directly in KNP CR as changing it in kafka would have no impact in case it is already specified in KNP + // if kafka KafkaNodePool is enabled change specification directly in KNP CR as changing it in kafka would have no impact in case it is already specified in KNP if (Environment.isKafkaNodePoolsEnabled()) { KafkaNodePoolResource.replaceKafkaNodePoolResourceInSpecificNamespace(testStorage.getNamespaceName(), testStorage.getBrokerPoolName(), knp -> { knp.getSpec() @@ -870,10 +870,10 @@ void testMetricsChange() throws JsonProcessingException { } /** - * Modifies a Kafka node pool to have an unreasonable CPU request, triggering a rolling update, + * Modifies a Kafka KafkaNodePool to have an unreasonable CPU request, triggering a rolling update, * and then recovers it to a normal state. CPU request is firstly increased, causing single pod * to enter a pending state. Afterward wait for the pod to stabilize before reducing the CPU - * request back to a reasonable amount, allowing the node pool to recover. + * request back to a reasonable amount, allowing the KafkaNodePool to recover. */ private static void modifyNodePoolToUnscheduledAndRecover(final String controllerPoolName, final LabelSelector controllerPoolSelector, final TestStorage testStorage) { // change knp to unreasonable CPU request causing trigger of Rolling update diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java index 47c3bad8fc7..9b8449e3a13 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java @@ -337,7 +337,7 @@ void testReplaceCustomClusterCACertificateValidityToInvokeRenewalProcess() { } // Pause Kafka reconciliation - LOGGER.info("Pause the reconciliation of the Kafka custom resource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); + LOGGER.info("Pause the reconciliation of the Kafka CustomResource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); KafkaUtils.annotateKafka(testStorage.getNamespaceName(), testStorage.getClusterName(), Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true")); // To test trigger of renewal of CA with short validity dates, both new dates need to be set @@ -352,7 +352,7 @@ void testReplaceCustomClusterCACertificateValidityToInvokeRenewalProcess() { KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getNamespaceName(), testStorage.getClusterName(), k -> k.getSpec().setClusterCa(newClusterCA)); // Resume Kafka reconciliation - LOGGER.info("Resume the reconciliation of the Kafka custom resource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); + LOGGER.info("Resume the reconciliation of the Kafka CustomResource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); KafkaUtils.removeAnnotation(testStorage.getNamespaceName(), testStorage.getClusterName(), Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION); // On the next reconciliation, the Cluster Operator performs a `rolling update`: @@ -444,7 +444,7 @@ void testReplaceCustomClientsCACertificateValidityToInvokeRenewalProcess() { final Date initialKafkaUserCertEndTime = userCert.getNotAfter(); // Pause Kafka reconciliation - LOGGER.info("Pause the reconciliation of the Kafka custom resource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); + LOGGER.info("Pause the reconciliation of the Kafka CustomResource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); KafkaUtils.annotateKafka(testStorage.getNamespaceName(), testStorage.getClusterName(), Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true")); LOGGER.info("Change of Kafka validity and renewal days - reconciliation should start"); @@ -456,7 +456,7 @@ void testReplaceCustomClientsCACertificateValidityToInvokeRenewalProcess() { KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getNamespaceName(), testStorage.getClusterName(), k -> k.getSpec().setClientsCa(newClientsCA)); // Resume Kafka reconciliation - LOGGER.info("Resume the reconciliation of the Kafka custom resource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); + LOGGER.info("Resume the reconciliation of the Kafka CustomResource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); KafkaUtils.removeAnnotation(testStorage.getNamespaceName(), testStorage.getClusterName(), Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION); // Wait for reconciliation and verify certs have been updated @@ -499,7 +499,7 @@ void testReplaceCustomClientsCACertificateValidityToInvokeRenewalProcess() { */ private void manuallyRenewCa(TestStorage testStorage, SystemTestCertHolder oldCa, SystemTestCertHolder newCa) { // Pause Kafka reconciliation - LOGGER.info("Pause the reconciliation of the Kafka custom resource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); + LOGGER.info("Pause the reconciliation of the Kafka CustomResource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); KafkaUtils.annotateKafka(testStorage.getNamespaceName(), testStorage.getClusterName(), Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true")); String certSecretName = ""; @@ -536,7 +536,7 @@ private void manuallyRenewCa(TestStorage testStorage, SystemTestCertHolder oldCa SystemTestCertHolder.increaseCertGenerationCounterInSecret(caKeySecret, testStorage, Ca.ANNO_STRIMZI_IO_CA_KEY_GENERATION); // Resume Kafka reconciliation - LOGGER.info("Resume the reconciliation of the Kafka custom resource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); + LOGGER.info("Resume the reconciliation of the Kafka CustomResource ({})", StrimziPodSetResource.getBrokerComponentName(testStorage.getClusterName())); KafkaUtils.removeAnnotation(testStorage.getNamespaceName(), testStorage.getClusterName(), Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/specific/SpecificST.java b/systemtest/src/test/java/io/strimzi/systemtest/specific/SpecificST.java index 09f310e822c..f701a606c35 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/specific/SpecificST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/specific/SpecificST.java @@ -95,7 +95,7 @@ void testClusterWideOperatorWithLimitedAccessToSpecificNamespaceViaRbacRole() { clusterOperator.unInstall(); - // create namespace, where we will be able to deploy Custom Resources + // create namespace, where we will be able to deploy CustomResources NamespaceManager.getInstance().createNamespaceAndPrepare(namespaceWhereCreationOfCustomResourcesIsApproved, CollectorElement.createCollectorElement(ResourceManager.getTestContext().getRequiredTestClass().getName(), ResourceManager.getTestContext().getRequiredTestMethod().getName())); @@ -120,7 +120,7 @@ void testClusterWideOperatorWithLimitedAccessToSpecificNamespaceViaRbacRole() { resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaEphemeral(Environment.TEST_SUITE_NAMESPACE, testStorage.getClusterName(), 3).build()); // implicit verification that a user is able to deploy Kafka cluster in namespace , where we are allowed - // to create Custom Resources because of `*-namespaced Role` + // to create CustomResources because of `*-namespaced Role` resourceManager.createResourceWithoutWait( NodePoolsConverter.convertNodePoolsIfNeeded( KafkaNodePoolTemplates.brokerPool(namespaceWhereCreationOfCustomResourcesIsApproved, testStorage.getBrokerPoolName(), testStorage.getClusterName(), 3).build(), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java index 49b9f592a2b..cc924266173 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java @@ -305,7 +305,7 @@ protected void modifyApplyClusterOperatorWithCRDsFromFile(String clusterOperator final List watchedNsRoleBindingFilePrefixes = List.of( "020-RoleBinding", // rb to role for creating KNative resources - "023-RoleBinding", // rb to role for watching Strimzi custom resources + "023-RoleBinding", // rb to role for watching Strimzi CustomResources "031-RoleBinding" // rb to role for entity operator ); @@ -339,7 +339,7 @@ protected void deleteInstalledYamls(String clusterOperatorNamespaceName, String if (root != null) { final List watchedNsRoleBindingFilePrefixes = List.of( "020-RoleBinding", // rb to role for creating KNative resources - "023-RoleBinding", // rb to role for watching Strimzi custom resources + "023-RoleBinding", // rb to role for watching Strimzi CustomResources "031-RoleBinding" // rb to role for entity operator ); @@ -731,7 +731,7 @@ protected void cleanUpKafkaTopics(String componentsNamespaceName) { cmdKubeClient(componentsNamespaceName).deleteAllByResource(KafkaTopic.RESOURCE_KIND); KafkaTopicUtils.waitForTopicWithPrefixDeletion(componentsNamespaceName, topicName); } else { - LOGGER.info("Kafka Topic Custom Resource Definition does not exist, no KafkaTopic is being deleted"); + LOGGER.info("Kafka Topic CustomResource Definition does not exist, no KafkaTopic is being deleted"); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/watcher/AbstractNamespaceST.java b/systemtest/src/test/java/io/strimzi/systemtest/watcher/AbstractNamespaceST.java index ee851768adb..171263c2942 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/watcher/AbstractNamespaceST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/watcher/AbstractNamespaceST.java @@ -65,7 +65,7 @@ public abstract class AbstractNamespaceST extends AbstractST { // namespace for all resources in this test except for KafkaTopics and KafkaUsers watched by Primary Kafka Cluser. static final String MAIN_TEST_NAMESPACE = "main-test-namespace"; - // namespace watched by Primary Kafka Cluster's TO and UO for any KafkaTopic and KafkaUser CRs. + // namespace watched by Primary Kafka cluster's TO and UO for any KafkaTopic and KafkaUser CRs. static final String PRIMARY_KAFKA_WATCHED_NAMESPACE = "primary-kafka-watched-namespace"; // name of kafka cluster which is to be created before any of these tests @@ -99,7 +99,7 @@ final void testDeployKafkaWithOperandsInNamespaceDifferentFromCO() { * from one where Cluster Operator resides correctly. * * @steps - * 1. - KafkaBridge custom resource is deployed in namespace watched by Cluster Operator. + * 1. - KafkaBridge CustomResource is deployed in namespace watched by Cluster Operator. * - KafkaBridge is transitioned into ready state. * * @usecase @@ -120,13 +120,13 @@ final void testKafkaBridgeInDifferentNamespaceFromCO(ExtensionContext extensionC /** * @description This test case verifies that Topic Operator configured to watch other namespace than the one it is deployed in still watches and acts upon - * custom resources correctly. + * CustomResources correctly. * * @steps - * 1. - As part of setup Kafka Cluster is deployed in main namespace, with Topic Operator configured to watch other namespace. + * 1. - As part of setup Kafka cluster is deployed in main namespace, with Topic Operator configured to watch other namespace. * - Kafka and its components are deployed and ready. - * 2. - KafkaTopic custom resource is created in namespace watched by Topic Operator. - * - Topic Operator acts upon KafkaTopic custom resource located in watched namespace and creates corresponding KafkaTopic in given Kafka Cluster. + * 2. - KafkaTopic CustomResource is created in namespace watched by Topic Operator. + * - Topic Operator acts upon KafkaTopic CustomResource located in watched namespace and creates corresponding KafkaTopic in given Kafka cluster. * * @usecase * - namespaces @@ -149,16 +149,16 @@ final void testTopicOperatorWatchingOtherNamespace(ExtensionContext extensionCon } /** - * @description This test case verifies that KafkaUser custom resource managed by is act upon by User Operator from correctly, despite being watched + * @description This test case verifies that KafkaUser CustomResource managed by is act upon by User Operator from correctly, despite being watched * from different namespace. * * @steps - * 1. - As part of setup Kafka Cluster is deployed in main namespace, with the User Operator configured to watch other namespace. + * 1. - As part of setup Kafka cluster is deployed in main namespace, with the User Operator configured to watch other namespace. * - Kafka and its components are deployed and ready. - * 2. - KafkaUser custom resource is created in namespace watched by Topic Operator. - * - Topic Operator acts upon KafkaUser custom resource which is transitioned into ready state while also creating all other resources (e.g., Secret). - * 3. - Credentials generated due to this KafkaUser custom resources are used in order to allow clients to communicate with Kafka Cluster. - * - Clients are able to successfully communicate with the Kafka Cluster. + * 2. - KafkaUser CustomResource is created in namespace watched by Topic Operator. + * - Topic Operator acts upon KafkaUser CustomResource which is transitioned into ready state while also creating all other resources (e.g., Secret). + * 3. - Credentials generated due to this KafkaUser CustomResources are used in order to allow clients to communicate with Kafka cluster. + * - Clients are able to successfully communicate with the Kafka cluster. * * @usecase * - namespaces @@ -198,15 +198,15 @@ final void testUserInNamespaceDifferentFromUserOperator(ExtensionContext extensi } /** - * @description This test case verifies that KafkaMirrorMaker2 custom resource can be created correctly in different namespace than the one containing Cluster Operator. + * @description This test case verifies that KafkaMirrorMaker2 CustomResource can be created correctly in different namespace than the one containing Cluster Operator. * * @steps - * 1. - As part of setup source Kafka Cluster is deployed in main namespace, + * 1. - As part of setup source Kafka cluster is deployed in main namespace, * - Kafka and its components are deployed and ready. - * 2. - Second Kafka Cluster is deployed in the same namespace as the first one. - * - Second Kafka Cluster is deployed and in ready state. - * 3. - MirrorMaker2 Custom Resource is deployed in same main namespace, pointing as source and target Kafka Cluster 2 Kafka Clusters mentioned in previous step. - * - KafkaMirrorMaker2 custom resource is in ready state. + * 2. - Second Kafka cluster is deployed in the same namespace as the first one. + * - Second Kafka cluster is deployed and in ready state. + * 3. - MirrorMaker2 CustomResource is deployed in same main namespace, pointing as source and target Kafka cluster 2 Kafka Clusters mentioned in previous step. + * - KafkaMirrorMaker2 CustomResource is in ready state. * * @usecase * - namespaces @@ -235,10 +235,10 @@ final void testDeployMirrorMaker2InNamespaceDifferentFromCO(ExtensionContext ext /** - * @description This test case verifies that KafkaConnect and KafkaConnector custom resource can be created correctly in different namespace than the one containing Cluster Operator. + * @description This test case verifies that KafkaConnect and KafkaConnector CustomResource can be created correctly in different namespace than the one containing Cluster Operator. * * @steps - * 1. - As part of setup source Kafka Cluster is deployed in main namespace, + * 1. - As part of setup source Kafka cluster is deployed in main namespace, * - Kafka and its components are deployed and ready. * 2. - KafkaConnect is deployed in another namespace than Cluster Operator. * - KafkaConnect cluster is successfully deployed. @@ -315,7 +315,7 @@ private void deployKafkaConnectorWithSink(ExtensionContext extensionContext, Str } /** - * Helper method which deploys Kafka Cluster and Scraper Pod in primary namespace. It is supposed to be called once there is a running Cluster Operator. + * Helper method which deploys Kafka cluster and Scraper Pod in primary namespace. It is supposed to be called once there is a running Cluster Operator. * */ final protected void deployAdditionalGenericResourcesForAbstractNamespaceST() { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/watcher/MultipleNamespaceST.java b/systemtest/src/test/java/io/strimzi/systemtest/watcher/MultipleNamespaceST.java index 3b35873d807..49dfd702099 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/watcher/MultipleNamespaceST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/watcher/MultipleNamespaceST.java @@ -42,7 +42,7 @@ private void deployTestSpecificClusterOperator() { void setupEnvironment() { deployTestSpecificClusterOperator(); - LOGGER.info("deploy all other resources (Kafka Cluster and Scrapper) for testing Namespaces"); + LOGGER.info("deploy all other resources (Kafka cluster and Scrapper) for testing Namespaces"); deployAdditionalGenericResourcesForAbstractNamespaceST(); } } \ No newline at end of file From 1ef290cbe2ab485e07589987e9f9aae69b878652 Mon Sep 17 00:00:00 2001 From: see-quick Date: Fri, 4 Oct 2024 10:37:48 +0200 Subject: [PATCH 09/12] a few updates Signed-off-by: see-quick --- .../systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md | 2 +- .../io/strimzi/systemtest/cruisecontrol/CruiseControlST.java | 2 +- .../test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md index 194baf3fdd1..452534f638f 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy Cluster Operator with default installation. | Cluster Operator is deployed. | +| 1. | Deploy cluster operator with default installation. | Cluster operator is deployed. | **Labels:** diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java index 1cc4e18a79b..55f0e3366fa 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java @@ -501,7 +501,7 @@ void testCruiseControlIntraBrokerBalancing() { } @IsolatedTest - @MixedRoleNotSupported("Scaling aKafkaNodePool with mixed roles is not supported yet") + @MixedRoleNotSupported("Scaling a KafkaNodePool with mixed roles is not supported yet") @TestDoc( description = @Desc("Testing the behavior of Cruise Control during both scaling up and down of Kafka brokers using KafkaNodePools."), steps = { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java index 18dd9ec8763..806c64e37e0 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java @@ -39,7 +39,7 @@ @SuiteDoc( description = @Desc("Verifies the basic functionality for each supported Kafka version."), beforeTestSteps = { - @Step(value = "Deploy Cluster Operator with default installation.", expected = "Cluster Operator is deployed.") + @Step(value = "Deploy cluster operator with default installation.", expected = "Cluster operator is deployed.") }, labels = { @Label(value = TestDocsLabels.KAFKA) From 9f1d183905ece7ded4d7047fc4d0e0f90bd1754a Mon Sep 17 00:00:00 2001 From: see-quick Date: Sat, 5 Oct 2024 16:18:10 +0200 Subject: [PATCH 10/12] update Lukas review Signed-off-by: see-quick --- .../io.strimzi.systemtest.kafka.KafkaNodePoolST.md | 4 ++-- .../io.strimzi.systemtest.kafka.listeners.ListenersST.md | 2 +- .../java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java | 4 ++-- .../src/test/java/io/strimzi/systemtest/kafka/KafkaST.java | 6 +++--- .../io/strimzi/systemtest/kafka/listeners/ListenersST.java | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md index c26f758c3d3..b99f76a5394 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md @@ -93,10 +93,10 @@ | 3. | Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully. | | 4. | Remove KafkaTopic. | KafkaTopic is cleaned as expected. | | 5. | Annotate one of KafkaNodePools to perform manual rolling update. | rolling update started. | -| 6. | Change role ofKafkaNodePool from mixed to controller only role. | Role Change is prevented due to existing KafkaTopic replicas and ongoing rolling update. | +| 6. | Change role of KafkaNodePool from mixed to controller only role. | Role Change is prevented due to existing KafkaTopic replicas and ongoing rolling update. | | 7. | Original rolling update finishes successfully. | rolling update is completed. | | 8. | Delete previously created KafkaTopic. | KafkaTopic is deleted and KafkaNodePool role change is initiated. | -| 9. | Change role ofKafkaNodePool from controller only to mixed role. | KafkaNodePool changes role to mixed role. | +| 9. | Change role of KafkaNodePool from controller only to mixed role. | KafkaNodePool changes role to mixed role. | | 10. | Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present. | Messages are produced and consumed successfully. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md index fb996159144..b40737ace8c 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md @@ -322,7 +322,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Create instances for broker pool and controller pool using NodePoolsConverter and KafkaNodePoolTemplates | Resources are created and ready for use | +| 1. | Create Kafka broker and controller KafkaNodePools. | Broker and controller KafkaNodePools are created | | 2. | Create Kafka cluster with ephemeral storage and load balancer listener | Kafka cluster is created with the specified configuration | | 3. | Wait until the load balancer address is reachable | Address is reachable | | 4. | Configure external Kafka client and send messages | Messages are sent successfully | diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java index 4b1ae3e6b48..e00b44f9c73 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java @@ -181,10 +181,10 @@ void testKafkaNodePoolBrokerIdsManagementUsingAnnotations() { @Step(value = "Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully."), @Step(value = "Remove KafkaTopic.", expected = "KafkaTopic is cleaned as expected."), @Step(value = "Annotate one of KafkaNodePools to perform manual rolling update.", expected = "rolling update started."), - @Step(value = "Change role ofKafkaNodePool from mixed to controller only role.", expected = "Role Change is prevented due to existing KafkaTopic replicas and ongoing rolling update."), + @Step(value = "Change role of KafkaNodePool from mixed to controller only role.", expected = "Role Change is prevented due to existing KafkaTopic replicas and ongoing rolling update."), @Step(value = "Original rolling update finishes successfully.", expected = "rolling update is completed."), @Step(value = "Delete previously created KafkaTopic.", expected = "KafkaTopic is deleted and KafkaNodePool role change is initiated."), - @Step(value = "Change role ofKafkaNodePool from controller only to mixed role.", expected = "KafkaNodePool changes role to mixed role."), + @Step(value = "Change role of KafkaNodePool from controller only to mixed role.", expected = "KafkaNodePool changes role to mixed role."), @Step(value = "Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present.", expected = "Messages are produced and consumed successfully.") }, labels = { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java index de676697ca1..cef5c8c32d6 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java @@ -1083,9 +1083,9 @@ void testDeployUnsupportedKafka() { ); resourceManager.createResourceWithoutWait(KafkaTemplates.kafkaEphemeral(testStorage.getNamespaceName(), testStorage.getClusterName(), 1, 1) .editSpec() - .editKafka() - .withVersion(nonExistingVersion) - .endKafka() + .editKafka() + .withVersion(nonExistingVersion) + .endKafka() .endSpec() .build() ); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java index 9a468c0c0d2..84faadf8c83 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java @@ -731,7 +731,7 @@ void testNodePortTls() { @TestDoc( description = @Desc("Test verifying load balancer functionality with external clients."), steps = { - @Step(value = "Create instances for broker pool and controller pool using NodePoolsConverter and KafkaNodePoolTemplates", expected = "Resources are created and ready for use"), + @Step(value = "Create Kafka broker and controller KafkaNodePools.", expected = "Broker and controller KafkaNodePools are created"), @Step(value = "Create Kafka cluster with ephemeral storage and load balancer listener", expected = "Kafka cluster is created with the specified configuration"), @Step(value = "Wait until the load balancer address is reachable", expected = "Address is reachable"), @Step(value = "Configure external Kafka client and send messages", expected = "Messages are sent successfully"), From 92ca96a606b0edec461b6e3ef4be9a4479a2edce Mon Sep 17 00:00:00 2001 From: see-quick Date: Mon, 7 Oct 2024 09:27:38 +0200 Subject: [PATCH 11/12] move CO to capitals Signed-off-by: see-quick --- ...o.strimzi.systemtest.cruisecontrol.CruiseControlApiST.md | 2 +- ...systemtest.cruisecontrol.CruiseControlConfigurationST.md | 2 +- .../io.strimzi.systemtest.cruisecontrol.CruiseControlST.md | 2 +- .../io.strimzi.systemtest.kafka.ConfigProviderST.md | 2 +- .../systemtests/io.strimzi.systemtest.kafka.KafkaST.md | 6 +++--- .../systemtests/io.strimzi.systemtest.kafka.QuotasST.md | 2 +- ...emtest.kafka.dynamicconfiguration.DynamicConfSharedST.md | 2 +- .../io.strimzi.systemtest.kafka.listeners.ListenersST.md | 2 +- .../systemtest/cruisecontrol/CruiseControlApiST.java | 2 +- .../cruisecontrol/CruiseControlConfigurationST.java | 2 +- .../strimzi/systemtest/cruisecontrol/CruiseControlST.java | 2 +- .../java/io/strimzi/systemtest/kafka/ConfigProviderST.java | 2 +- .../src/test/java/io/strimzi/systemtest/kafka/KafkaST.java | 6 +++--- .../src/test/java/io/strimzi/systemtest/kafka/QuotasST.java | 2 +- .../kafka/dynamicconfiguration/DynamicConfSharedST.java | 2 +- .../io/strimzi/systemtest/kafka/listeners/ListenersST.java | 2 +- 16 files changed, 20 insertions(+), 20 deletions(-) diff --git a/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlApiST.md b/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlApiST.md index 5d2a41ce44f..0275a426813 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlApiST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlApiST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy the cluster operator | Cluster operator is deployed | +| 1. | Deploy the Cluster Operator | Cluster Operator is deployed | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlConfigurationST.md b/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlConfigurationST.md index e0e1b52444f..99480194e64 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlConfigurationST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlConfigurationST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Set up the cluster operator | Cluster operator is installed and running | +| 1. | Set up the Cluster Operator | Cluster Operator is installed and running | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlST.md b/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlST.md index e8b038783ca..904fa5f9ad9 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.cruisecontrol.CruiseControlST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy cluster operator with default installation | Cluster operator is deployed and running | +| 1. | Deploy Cluster Operator with default installation | Cluster Operator is deployed and running | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md index 6de134d07f9..409f3e322cc 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy cluster operator across all namespaces, with custom configuration. | Cluster operator is deployed. | +| 1. | Deploy Cluster Operator across all namespaces, with custom configuration. | Cluster Operator is deployed. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md index 00fd4168661..43fff6e2f83 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy cluster operator across all namespaces, with custom configuration. | Cluster operator is deployed. | +| 1. | Deploy Cluster Operator across all namespaces, with custom configuration. | Cluster Operator is deployed. | **Labels:** @@ -71,7 +71,7 @@ ## testKRaftMode -**Description:** This test case verifies basic working of Kafka Cluster managed by cluster operator with KRaft. +**Description:** This test case verifies basic working of Kafka Cluster managed by Cluster Operator with KRaft. **Steps:** @@ -96,7 +96,7 @@ | - | - | - | | 1. | Deploy Kafka with persistent storage and JBOD storage with 2 volumes, both of which are configured to delete their Persistent Volume Claims on Kafka cluster un-provision. | Kafka is deployed, volumes are labeled and linked to Pods correctly. | | 2. | Verify that labels in Persistent Volume Claims are set correctly. | Persistent Volume Claims contains expected labels and values. | -| 3. | Modify Kafka CustomResource, specifically 'deleteClaim' property of its first Kafka Volume. | Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by cluster operator. | +| 3. | Modify Kafka CustomResource, specifically 'deleteClaim' property of its first Kafka Volume. | Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by Cluster Operator. | | 4. | Delete Kafka cluster. | Kafka cluster and its components are deleted, including Persistent Volume Claim of Volume with 'deleteClaim' property set to true. | | 5. | Verify remaining Persistent Volume Claims. | Persistent Volume Claim referenced by volume of formerly deleted Kafka CustomResource with property 'deleteClaim' set to true is still present. | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md index 1634c540467..647aa1548b9 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.QuotasST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy default cluster operator with the required configurations. | Cluster operator is deployed. | +| 1. | Deploy default Cluster Operator with the required configurations. | Cluster Operator is deployed. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md index d6ee5ef7d7d..4ab4777628b 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.dynamicconfiguration.DynamicConfSharedST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Run cluster operator installation. | Cluster operator is installed. | +| 1. | Run Cluster Operator installation. | Cluster Operator is installed. | | 2. | Deploy shared Kafka across all test cases. | Shared Kafka is deployed. | | 3. | Deploy scraper pod. | Scraper pod is deployed. | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md index b40737ace8c..5aefa531ade 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.listeners.ListenersST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Install the cluster operator with default settings. | Cluster operator is installed successfully. | +| 1. | Install the Cluster Operator with default settings. | Cluster Operator is installed successfully. | **Labels:** diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlApiST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlApiST.java index 6f4b1b1e353..7dc6697b0c3 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlApiST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlApiST.java @@ -40,7 +40,7 @@ @SuiteDoc( description = @Desc("This test suite verifies that Cruise Control's basic API requests function correctly"), beforeTestSteps = { - @Step(value = "Deploy the cluster operator", expected = "Cluster operator is deployed") + @Step(value = "Deploy the Cluster Operator", expected = "Cluster Operator is deployed") }, labels = { @Label(value = TestDocsLabels.CRUISE_CONTROL), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java index 5e45a662e09..2e3a6f7e231 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java @@ -60,7 +60,7 @@ @SuiteDoc( description = @Desc("This test suite, verify configuration of the Cruise Control component."), beforeTestSteps = { - @Step(value = "Set up the cluster operator", expected = "Cluster operator is installed and running") + @Step(value = "Set up the Cluster Operator", expected = "Cluster Operator is installed and running") }, labels = { @Label(value = TestDocsLabels.CRUISE_CONTROL) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java index 55f0e3366fa..849806459d8 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java @@ -84,7 +84,7 @@ description = @Desc("This test suite validates the functionality and behavior of Cruise Control across multiple Kafka scenarios. " + "It ensures correct operation under various configurations and conditions."), beforeTestSteps = { - @Step(value = "Deploy cluster operator with default installation", expected = "Cluster operator is deployed and running") + @Step(value = "Deploy Cluster Operator with default installation", expected = "Cluster Operator is deployed and running") }, labels = { @Label(value = TestDocsLabels.CRUISE_CONTROL), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java index d8ba47f2369..b316057d797 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java @@ -47,7 +47,7 @@ @SuiteDoc( description = @Desc("This test suite verifies Kafka Connect using ConfigMap and EnvVar configuration."), beforeTestSteps = { - @Step(value = "Deploy cluster operator across all namespaces, with custom configuration.", expected = "Cluster operator is deployed.") + @Step(value = "Deploy Cluster Operator across all namespaces, with custom configuration.", expected = "Cluster Operator is deployed.") }, labels = { @Label(value = TestDocsLabels.KAFKA) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java index cef5c8c32d6..53441984213 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java @@ -115,7 +115,7 @@ @SuiteDoc( description = @Desc("Test suite containing Kafka related stuff (i.e., JVM resources, EO, TO or UO removal from Kafka cluster), which ensures proper functioning of Kafka clusters."), beforeTestSteps = { - @Step(value = "Deploy cluster operator across all namespaces, with custom configuration.", expected = "Cluster operator is deployed.") + @Step(value = "Deploy Cluster Operator across all namespaces, with custom configuration.", expected = "Cluster Operator is deployed.") }, labels = { @Label(value = TestDocsLabels.KAFKA) @@ -399,7 +399,7 @@ void testRemoveComponentsFromEntityOperator() { steps = { @Step(value = "Deploy Kafka with persistent storage and JBOD storage with 2 volumes, both of which are configured to delete their Persistent Volume Claims on Kafka cluster un-provision.", expected = "Kafka is deployed, volumes are labeled and linked to Pods correctly."), @Step(value = "Verify that labels in Persistent Volume Claims are set correctly.", expected = "Persistent Volume Claims contains expected labels and values."), - @Step(value = "Modify Kafka CustomResource, specifically 'deleteClaim' property of its first Kafka Volume.", expected = "Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by cluster operator."), + @Step(value = "Modify Kafka CustomResource, specifically 'deleteClaim' property of its first Kafka Volume.", expected = "Kafka CR is successfully modified, annotation of according Persistent Volume Claim is changed afterwards by Cluster Operator."), @Step(value = "Delete Kafka cluster.", expected = "Kafka cluster and its components are deleted, including Persistent Volume Claim of Volume with 'deleteClaim' property set to true."), @Step(value = "Verify remaining Persistent Volume Claims.", expected = "Persistent Volume Claim referenced by volume of formerly deleted Kafka CustomResource with property 'deleteClaim' set to true is still present.") }, @@ -1223,7 +1223,7 @@ void testResizeJbodVolumes() { @ParallelNamespaceTest() @TestDoc( - description = @Desc("This test case verifies basic working of Kafka Cluster managed by cluster operator with KRaft."), + description = @Desc("This test case verifies basic working of Kafka Cluster managed by Cluster Operator with KRaft."), steps = { @Step(value = "Deploy Kafka annotated to enable KRaft (and additionally annotated to enable KafkaNodePool management), and configure a KafkaNodePool resource to target the Kafka cluster.", expected = "Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected."), @Step(value = "Produce and consume messages in given Kafka Cluster.", expected = "Clients can produce and consume messages."), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java index c8035a138f8..a0d19282aa2 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java @@ -46,7 +46,7 @@ @SuiteDoc( description = @Desc("NOTE: STs in this class will not work properly on `minikube` clusters (and maybe not on other clusters that use local storage), because the calculation of currently used storage is based on the local storage, which can be shared across multiple Docker containers. To properly run this suite, you should use cluster with proper storage."), beforeTestSteps = { - @Step(value = "Deploy default cluster operator with the required configurations.", expected = "Cluster operator is deployed.") + @Step(value = "Deploy default Cluster Operator with the required configurations.", expected = "Cluster Operator is deployed.") }, labels = { @Label(value = TestDocsLabels.KAFKA) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java index f1b6e0cad5e..06a0abb3888 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java @@ -51,7 +51,7 @@ @SuiteDoc( description = @Desc("DynamicConfigurationSharedST is responsible for verifying that changing dynamic Kafka configuration will not trigger a rolling update. Shared -> for each test case we use the same Kafka resource configuration."), beforeTestSteps = { - @Step(value = "Run cluster operator installation.", expected = "Cluster operator is installed."), + @Step(value = "Run Cluster Operator installation.", expected = "Cluster Operator is installed."), @Step(value = "Deploy shared Kafka across all test cases.", expected = "Shared Kafka is deployed."), @Step(value = "Deploy scraper pod.", expected = "Scraper pod is deployed.") }, diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java index 84faadf8c83..7f5a44ce389 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java @@ -100,7 +100,7 @@ @SuiteDoc( description = @Desc("This class demonstrates various tests for Kafka listeners using different authentication mechanisms."), beforeTestSteps = { - @Step(value = "Install the cluster operator with default settings.", expected = "Cluster operator is installed successfully.") + @Step(value = "Install the Cluster Operator with default settings.", expected = "Cluster Operator is installed successfully.") }, labels = { @Label(value = TestDocsLabels.KAFKA) From 38aa0cc80ff6c0f0acf942d87f7d4305e88f4cff Mon Sep 17 00:00:00 2001 From: see-quick Date: Wed, 9 Oct 2024 10:50:47 +0200 Subject: [PATCH 12/12] Jakub review Signed-off-by: see-quick --- .../systemtests/io.strimzi.systemtest.connect.ConnectST.md | 3 +-- .../io.strimzi.systemtest.kafka.ConfigProviderST.md | 6 +++--- .../io.strimzi.systemtest.kafka.KafkaNodePoolST.md | 4 ++-- .../io.strimzi.systemtest.kafka.KafkaVersionsST.md | 2 +- .../test/java/io/strimzi/systemtest/connect/ConnectST.java | 3 +-- .../java/io/strimzi/systemtest/kafka/ConfigProviderST.java | 6 +++--- .../java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java | 4 ++-- .../java/io/strimzi/systemtest/kafka/KafkaVersionsST.java | 2 +- 8 files changed, 14 insertions(+), 16 deletions(-) diff --git a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md index 4d454853d4c..6eb9f120ea5 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.connect.ConnectST.md @@ -114,8 +114,7 @@ | 3. | Create and wait for resources. | Kafka resources, including KafkaNodePools and KafkaConnect instances, are created and become ready. | | 4. | Annotate for manual rolling update. | KafkaConnect components are annotated for a manual rolling update. | | 5. | Perform and wait for rolling update. | KafkaConnect components roll and new pods are deployed. | -| 6. | Kafka Connect pod. | Pod configurations and annotations are verified. | -| 7. | KafkaConnectors. | Various Kafka Connect resource labels and configurations are verified to ensure correct deployment. | +| 6. | Verify KafkaConnect Pod. | Pod configurations and labels are verified. | **Labels:** diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md index 409f3e322cc..9abcc134482 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.ConfigProviderST.md @@ -1,6 +1,6 @@ # ConfigProviderST -**Description:** This test suite verifies Kafka Connect using ConfigMap and EnvVar configuration. +**Description:** This test suite verifies KafkaConnect using ConfigMap and EnvVar configuration. **Before tests execution steps:** @@ -25,9 +25,9 @@ | 1. | Create broker and controller KafkaNodePools. | Resources are created and are in ready state. | | 2. | Create Kafka cluster. | Kafka cluster is ready | | 3. | Create ConfigMap for connector configuration. | ConfigMap with connector configuration is created. | -| 4. | Deploy Kafka Connect with external configuration from ConfigMap. | Kafka Connect is deployed with proper configuration. | +| 4. | Deploy Kafka Connect with external configuration from ConfigMap. | KafkaConnect is deployed with proper configuration. | | 5. | Create necessary Role and RoleBinding for connector. | Role and RoleBinding are created and applied. | -| 6. | Deploy Kafka connector. | Kafka connector is successfully deployed. | +| 6. | Deploy KafkaConnector. | KafkaConnector is successfully deployed. | | 7. | Deploy Kafka clients. | Kafka clients are deployed and ready. | | 8. | Send messages and verify they are written to sink file. | Messages are successfully written to the specified sink file. | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md index b99f76a5394..d4e8cba9a36 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md @@ -92,9 +92,9 @@ | 2. | Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present. | KafkaTopic created. | | 3. | Deploy clients and transmit messages and remove KafkaTopic. | Transition of messages is finished successfully. | | 4. | Remove KafkaTopic. | KafkaTopic is cleaned as expected. | -| 5. | Annotate one of KafkaNodePools to perform manual rolling update. | rolling update started. | +| 5. | Annotate one of KafkaNodePools to perform manual rolling update. | Rolling update started. | | 6. | Change role of KafkaNodePool from mixed to controller only role. | Role Change is prevented due to existing KafkaTopic replicas and ongoing rolling update. | -| 7. | Original rolling update finishes successfully. | rolling update is completed. | +| 7. | Original rolling update finishes successfully. | Rolling update is completed. | | 8. | Delete previously created KafkaTopic. | KafkaTopic is deleted and KafkaNodePool role change is initiated. | | 9. | Change role of KafkaNodePool from controller only to mixed role. | KafkaNodePool changes role to mixed role. | | 10. | Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present. | Messages are produced and consumed successfully. | diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md index 452534f638f..194baf3fdd1 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaVersionsST.md @@ -6,7 +6,7 @@ | Step | Action | Result | | - | - | - | -| 1. | Deploy cluster operator with default installation. | Cluster operator is deployed. | +| 1. | Deploy Cluster Operator with default installation. | Cluster Operator is deployed. | **Labels:** diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java index 8e514649707..ab4d3a32439 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java @@ -136,8 +136,7 @@ class ConnectST extends AbstractST { @Step(value = "Create and wait for resources.", expected = "Kafka resources, including KafkaNodePools and KafkaConnect instances, are created and become ready."), @Step(value = "Annotate for manual rolling update.", expected = "KafkaConnect components are annotated for a manual rolling update."), @Step(value = "Perform and wait for rolling update.", expected = "KafkaConnect components roll and new pods are deployed."), - @Step(value = "Kafka Connect pod.", expected = "Pod configurations and annotations are verified."), - @Step(value = "KafkaConnectors.", expected = "Various Kafka Connect resource labels and configurations are verified to ensure correct deployment.") + @Step(value = "Verify KafkaConnect Pod.", expected = "Pod configurations and labels are verified."), }, labels = { @Label(value = TestDocsLabels.CONNECT) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java index b316057d797..76725d063b6 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java @@ -45,7 +45,7 @@ @Tag(REGRESSION) @SuiteDoc( - description = @Desc("This test suite verifies Kafka Connect using ConfigMap and EnvVar configuration."), + description = @Desc("This test suite verifies KafkaConnect using ConfigMap and EnvVar configuration."), beforeTestSteps = { @Step(value = "Deploy Cluster Operator across all namespaces, with custom configuration.", expected = "Cluster Operator is deployed.") }, @@ -64,9 +64,9 @@ public class ConfigProviderST extends AbstractST { @Step(value = "Create broker and controller KafkaNodePools.", expected = "Resources are created and are in ready state."), @Step(value = "Create Kafka cluster.", expected = "Kafka cluster is ready"), @Step(value = "Create ConfigMap for connector configuration.", expected = "ConfigMap with connector configuration is created."), - @Step(value = "Deploy Kafka Connect with external configuration from ConfigMap.", expected = "Kafka Connect is deployed with proper configuration."), + @Step(value = "Deploy Kafka Connect with external configuration from ConfigMap.", expected = "KafkaConnect is deployed with proper configuration."), @Step(value = "Create necessary Role and RoleBinding for connector.", expected = "Role and RoleBinding are created and applied."), - @Step(value = "Deploy Kafka connector.", expected = "Kafka connector is successfully deployed."), + @Step(value = "Deploy KafkaConnector.", expected = "KafkaConnector is successfully deployed."), @Step(value = "Deploy Kafka clients.", expected = "Kafka clients are deployed and ready."), @Step(value = "Send messages and verify they are written to sink file.", expected = "Messages are successfully written to the specified sink file.") }, diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java index e00b44f9c73..0ae4f7151bc 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java @@ -180,9 +180,9 @@ void testKafkaNodePoolBrokerIdsManagementUsingAnnotations() { @Step(value = "Create KafkaTopic with replica number requiring all the remaining Kafka Brokers to be present.", expected = "KafkaTopic created."), @Step(value = "Deploy clients and transmit messages and remove KafkaTopic.", expected = "Transition of messages is finished successfully."), @Step(value = "Remove KafkaTopic.", expected = "KafkaTopic is cleaned as expected."), - @Step(value = "Annotate one of KafkaNodePools to perform manual rolling update.", expected = "rolling update started."), + @Step(value = "Annotate one of KafkaNodePools to perform manual rolling update.", expected = "Rolling update started."), @Step(value = "Change role of KafkaNodePool from mixed to controller only role.", expected = "Role Change is prevented due to existing KafkaTopic replicas and ongoing rolling update."), - @Step(value = "Original rolling update finishes successfully.", expected = "rolling update is completed."), + @Step(value = "Original rolling update finishes successfully.", expected = "Rolling update is completed."), @Step(value = "Delete previously created KafkaTopic.", expected = "KafkaTopic is deleted and KafkaNodePool role change is initiated."), @Step(value = "Change role of KafkaNodePool from controller only to mixed role.", expected = "KafkaNodePool changes role to mixed role."), @Step(value = "Produce and consume messages on newly created KafkaTopic with replica count requiring also new brokers to be present.", expected = "Messages are produced and consumed successfully.") diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java index 806c64e37e0..18dd9ec8763 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java @@ -39,7 +39,7 @@ @SuiteDoc( description = @Desc("Verifies the basic functionality for each supported Kafka version."), beforeTestSteps = { - @Step(value = "Deploy cluster operator with default installation.", expected = "Cluster operator is deployed.") + @Step(value = "Deploy Cluster Operator with default installation.", expected = "Cluster Operator is deployed.") }, labels = { @Label(value = TestDocsLabels.KAFKA)