diff --git a/systemtest/src/main/java/io/strimzi/systemtest/listeners/ExecutionListener.java b/systemtest/src/main/java/io/strimzi/systemtest/listeners/ExecutionListener.java index b0a9e601906..fdfa3f55fe5 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/listeners/ExecutionListener.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/listeners/ExecutionListener.java @@ -67,7 +67,9 @@ public static boolean requiresSharedNamespace(final ExtensionContext extensionCo TestConstants.DYNAMIC_CONFIGURATION, // Dynamic configuration also because in DynamicConfSharedST we use @TestFactory TestConstants.TRACING, // Tracing, because we deploy Jaeger operator inside additional namespace TestConstants.KAFKA_SMOKE, // KafkaVersionsST, MigrationST because here we use @ParameterizedTest - TestConstants.MIGRATION + TestConstants.MIGRATION, + TestConstants.UPGRADE, + TestConstants.KRAFT_UPGRADE ); for (TestIdentifier testIdentifier : testCases) { diff --git a/systemtest/src/main/java/io/strimzi/systemtest/resources/ResourceManager.java b/systemtest/src/main/java/io/strimzi/systemtest/resources/ResourceManager.java index a90b64f3403..58c64aabfe7 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/resources/ResourceManager.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/resources/ResourceManager.java @@ -517,13 +517,13 @@ public static void setCoDeploymentName(String newName) { coDeploymentName = newName; } - public static void waitForResourceReadiness(String resourceType, String resourceName) { + public static void waitForResourceReadiness(String namespaceName, String resourceType, String resourceName) { LOGGER.info("Waiting for " + resourceType + "/" + resourceName + " readiness"); TestUtils.waitFor("readiness of resource " + resourceType + "/" + resourceName, TestConstants.GLOBAL_POLL_INTERVAL, TestConstants.GLOBAL_CMD_CLIENT_TIMEOUT, - () -> ResourceManager.cmdKubeClient().getResourceReadiness(resourceType, resourceName)); - LOGGER.info("Resource " + resourceType + "/" + resourceName + " is ready"); + () -> ResourceManager.cmdKubeClient().namespace(namespaceName).getResourceReadiness(resourceType, resourceName)); + LOGGER.info("Resource " + resourceType + "/" + resourceName + " in namespace:" + namespaceName + " is ready"); } public static > boolean waitForResourceStatusMessage(MixedOperation operation, T resource, String message) { diff --git a/systemtest/src/main/java/io/strimzi/systemtest/resources/operator/SetupClusterOperator.java b/systemtest/src/main/java/io/strimzi/systemtest/resources/operator/SetupClusterOperator.java index db9d8c1c9b7..a816cfef3af 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/resources/operator/SetupClusterOperator.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/resources/operator/SetupClusterOperator.java @@ -362,8 +362,8 @@ private void olmInstallation() { * Upgrade cluster operator by updating subscription and obtaining new install plan, * which has not been used yet and also approves the installation */ - public void upgradeClusterOperator(OlmConfiguration olmConfiguration) { - if (kubeClient().listPodsByPrefixInName(ResourceManager.getCoDeploymentName()).isEmpty()) { + public void upgradeClusterOperator(String namespaceName, OlmConfiguration olmConfiguration) { + if (kubeClient(namespaceName).listPodsByPrefixInName(ResourceManager.getCoDeploymentName()).isEmpty()) { throw new RuntimeException("We can not perform upgrade! Cluster Operator Pod is not present."); } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/ClientUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/ClientUtils.java index eb0ed1ec2a6..2a603994e86 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/ClientUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/ClientUtils.java @@ -114,6 +114,16 @@ public static void waitForInstantConsumerClientSuccess(TestStorage testStorage) waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), testStorage.getMessageCount()); } + /** + * Waits for the instant producer client to succeed with explicitly specified namespace automatically deleting the associated job afterward. + * + * @param namespaceName Explicit namespace name. + * @param testStorage The {@link TestStorage} instance containing details about the client's name. + */ + public static void waitForInstantProducerClientSuccess(String namespaceName, TestStorage testStorage) { + waitForClientSuccess(testStorage.getProducerName(), namespaceName, testStorage.getMessageCount()); + } + /** * Waits for the instant producer client to succeed, automatically deleting the associated job afterward. * {@link TestStorage#getProducerName()} is used for identifying producer Job and diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/StUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/StUtils.java index 6429f28f3a1..525094b31b9 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/StUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/StUtils.java @@ -324,11 +324,10 @@ public static void waitUntilLogFromPodContainsString(String namespaceName, Strin * Change Deployment configuration before applying it. We set different namespace, log level and image pull policy. * It's mostly used for use cases where we use direct kubectl command instead of fabric8 calls to api. * @param deploymentFile loaded Strimzi deployment file - * @param namespace Namespace where Strimzi should be installed * @param strimziFeatureGatesValue feature gates value * @return deployment file content as String */ - public static String changeDeploymentConfiguration(File deploymentFile, String namespace, final String strimziFeatureGatesValue) { + public static String changeDeploymentConfiguration(String namespaceName, File deploymentFile, final String strimziFeatureGatesValue) { YAMLMapper mapper = new YAMLMapper(); try { JsonNode node = mapper.readTree(deploymentFile); @@ -339,7 +338,7 @@ public static String changeDeploymentConfiguration(File deploymentFile, String n if (varName.matches("STRIMZI_NAMESPACE")) { // Replace all the default images with ones from the $DOCKER_ORG org and with the $DOCKER_TAG tag ((ObjectNode) envVar).remove("valueFrom"); - ((ObjectNode) envVar).put("value", namespace); + ((ObjectNode) envVar).put("value", namespaceName); } if (varName.matches("STRIMZI_LOG_LEVEL")) { diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java index 7c99cdf8316..085e27c170c 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java @@ -116,10 +116,10 @@ public static void waitUntilKafkaStatusConditionContainsMessage(String clusterNa }); } - public static void waitUntilStatusKafkaVersionMatchesExpectedVersion(String clusterName, String namespace, String expectedKafkaVersion) { + public static void waitUntilStatusKafkaVersionMatchesExpectedVersion(String namespaceName, String clusterName, String expectedKafkaVersion) { TestUtils.waitFor("Kafka version '" + expectedKafkaVersion + "' in Kafka cluster '" + clusterName + "' to match", TestConstants.GLOBAL_POLL_INTERVAL, TestConstants.GLOBAL_STATUS_TIMEOUT, () -> { - String kafkaVersionInStatus = KafkaResource.kafkaClient().inNamespace(namespace).withName(clusterName).get().getStatus().getKafkaVersion(); + String kafkaVersionInStatus = KafkaResource.kafkaClient().inNamespace(namespaceName).withName(clusterName).get().getStatus().getKafkaVersion(); return expectedKafkaVersion.equals(kafkaVersionInStatus); }); } @@ -409,9 +409,9 @@ public static String generateRandomNameOfKafka(String clusterName) { return clusterName + "-" + RANDOM.nextInt(Integer.MAX_VALUE); } - public static String getVersionFromKafkaPodLibs(String kafkaPodName) { + public static String getVersionFromKafkaPodLibs(String namespaceName, String kafkaPodName) { String command = "ls libs | grep -Po 'kafka_\\d+.\\d+-\\K(\\d+.\\d+.\\d+)(?=.*jar)' | head -1 | cut -d \"-\" -f2"; - return cmdKubeClient().execInPodContainer( + return cmdKubeClient(namespaceName).execInPodContainer( kafkaPodName, "kafka", "/bin/bash", diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java index 490781e9fb3..c34cb2dcd31 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java @@ -63,7 +63,6 @@ import java.util.Random; import java.util.stream.IntStream; -import static io.strimzi.systemtest.TestConstants.CO_NAMESPACE; import static io.strimzi.systemtest.TestConstants.DEFAULT_SINK_FILE_PATH; import static io.strimzi.systemtest.TestConstants.PATH_TO_KAFKA_TOPIC_CONFIG; import static io.strimzi.systemtest.TestConstants.PATH_TO_PACKAGING; @@ -92,7 +91,6 @@ public class AbstractUpgradeST extends AbstractST { protected Map controllerPods; protected Map brokerPods; protected Map eoPods; - protected Map coPods; protected Map connectPods; protected final LabelSelector brokerSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); @@ -103,7 +101,7 @@ public class AbstractUpgradeST extends AbstractST { protected final String topicName = "my-topic"; protected final String userName = "my-user"; - protected final int upgradeTopicCount = 40; + protected final int upgradeTopicCount = 20; protected final int btoKafkaTopicsOnlyCount = 3; // ExpectedTopicCount contains additionally consumer-offset topic, my-topic and continuous-topic @@ -127,22 +125,21 @@ protected int getExpectedTopicCount(boolean isUTOUsed, boolean wasUTOUsedBefore) return upgradeTopicCount + btoKafkaTopicsOnlyCount; } - protected void makeSnapshots() { - coPods = DeploymentUtils.depSnapshot(TestConstants.CO_NAMESPACE, ResourceManager.getCoDeploymentName()); - controllerPods = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, controllerSelector); - brokerPods = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, brokerSelector); - eoPods = DeploymentUtils.depSnapshot(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName)); - connectPods = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, connectLabelSelector); + protected void makeComponentsSnapshots(String componentsNamespaceName) { + controllerPods = PodUtils.podSnapshot(componentsNamespaceName, controllerSelector); + brokerPods = PodUtils.podSnapshot(componentsNamespaceName, brokerSelector); + eoPods = DeploymentUtils.depSnapshot(componentsNamespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)); + connectPods = PodUtils.podSnapshot(componentsNamespaceName, connectLabelSelector); } @SuppressWarnings("CyclomaticComplexity") - protected void changeKafkaAndLogFormatVersion(CommonVersionModificationData versionModificationData) throws IOException { + protected void changeKafkaAndLogFormatVersion(String componentsNamespaceName, CommonVersionModificationData versionModificationData) throws IOException { // Get Kafka configurations - String currentLogMessageFormat = cmdKubeClient().getResourceJsonPath(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, ".spec.kafka.config.log\\.message\\.format\\.version"); - String currentInterBrokerProtocol = cmdKubeClient().getResourceJsonPath(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, ".spec.kafka.config.inter\\.broker\\.protocol\\.version"); + String currentLogMessageFormat = cmdKubeClient(componentsNamespaceName).getResourceJsonPath(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, ".spec.kafka.config.log\\.message\\.format\\.version"); + String currentInterBrokerProtocol = cmdKubeClient(componentsNamespaceName).getResourceJsonPath(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, ".spec.kafka.config.inter\\.broker\\.protocol\\.version"); // Get Kafka version - String kafkaVersionFromCR = cmdKubeClient().getResourceJsonPath(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, ".spec.kafka.version"); + String kafkaVersionFromCR = cmdKubeClient(componentsNamespaceName).getResourceJsonPath(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, ".spec.kafka.version"); kafkaVersionFromCR = kafkaVersionFromCR.equals("") ? null : kafkaVersionFromCR; String kafkaVersionFromProcedure = versionModificationData.getProcedures().getVersion(); @@ -161,15 +158,15 @@ protected void changeKafkaAndLogFormatVersion(CommonVersionModificationData vers LOGGER.info("Deploying Kafka from: {}", kafkaYaml.getPath()); // Change kafka version of it's empty (null is for remove the version) String defaultValueForVersions = kafkaVersionFromCR == null ? null : TestKafkaVersion.getSpecificVersion(kafkaVersionFromCR).messageVersion(); - cmdKubeClient().applyContent(KafkaUtils.changeOrRemoveKafkaConfiguration(kafkaYaml, kafkaVersionFromCR, defaultValueForVersions, defaultValueForVersions)); + cmdKubeClient(componentsNamespaceName).applyContent(KafkaUtils.changeOrRemoveKafkaConfiguration(kafkaYaml, kafkaVersionFromCR, defaultValueForVersions, defaultValueForVersions)); kafkaUserYaml = new File(examplesPath + "/examples/user/kafka-user.yaml"); LOGGER.info("Deploying KafkaUser from: {}", kafkaUserYaml.getPath()); - cmdKubeClient().applyContent(KafkaUserUtils.removeKafkaUserPart(kafkaUserYaml, "authorization")); + cmdKubeClient(componentsNamespaceName).applyContent(KafkaUserUtils.removeKafkaUserPart(kafkaUserYaml, "authorization")); kafkaTopicYaml = new File(examplesPath + "/examples/topic/kafka-topic.yaml"); LOGGER.info("Deploying KafkaTopic from: {}", kafkaTopicYaml.getPath()); - cmdKubeClient().applyContent(TestUtils.readFile(kafkaTopicYaml)); + cmdKubeClient(componentsNamespaceName).applyContent(TestUtils.readFile(kafkaTopicYaml)); // ####################################################################### @@ -177,9 +174,9 @@ protected void changeKafkaAndLogFormatVersion(CommonVersionModificationData vers if (kafkaVersionFromProcedure != null && !kafkaVersionFromProcedure.isEmpty() && !kafkaVersionFromCR.contains(kafkaVersionFromProcedure) && ResourceManager.getTestContext().getTestClass().get().getSimpleName().toLowerCase(Locale.ROOT).contains("upgrade")) { LOGGER.info("Set Kafka version to " + kafkaVersionFromProcedure); - cmdKubeClient().patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/version", kafkaVersionFromProcedure); + cmdKubeClient(componentsNamespaceName).patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/version", kafkaVersionFromProcedure); LOGGER.info("Waiting for Kafka rolling update to finish"); - brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(TestConstants.CO_NAMESPACE, brokerSelector, 3, brokerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(componentsNamespaceName, brokerSelector, 3, brokerPods); } String logMessageVersion = versionModificationData.getProcedures().getLogMessageVersion(); @@ -188,39 +185,43 @@ protected void changeKafkaAndLogFormatVersion(CommonVersionModificationData vers if (logMessageVersion != null && !logMessageVersion.isEmpty() || interBrokerProtocolVersion != null && !interBrokerProtocolVersion.isEmpty()) { if (!logMessageVersion.isEmpty()) { LOGGER.info("Set log message format version to {} (current version is {})", logMessageVersion, currentLogMessageFormat); - cmdKubeClient().patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/config/log.message.format.version", logMessageVersion); + cmdKubeClient(componentsNamespaceName).patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/config/log.message.format.version", logMessageVersion); } if (!interBrokerProtocolVersion.isEmpty()) { LOGGER.info("Set inter-broker protocol version to {} (current version is {})", interBrokerProtocolVersion, currentInterBrokerProtocol); LOGGER.info("Set inter-broker protocol version to " + interBrokerProtocolVersion); - cmdKubeClient().patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/config/inter.broker.protocol.version", interBrokerProtocolVersion); + cmdKubeClient(componentsNamespaceName).patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/config/inter.broker.protocol.version", interBrokerProtocolVersion); } if ((currentInterBrokerProtocol != null && !currentInterBrokerProtocol.equals(interBrokerProtocolVersion)) || (currentLogMessageFormat != null && !currentLogMessageFormat.isEmpty() && !currentLogMessageFormat.equals(logMessageVersion))) { LOGGER.info("Waiting for Kafka rolling update to finish"); - brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(TestConstants.CO_NAMESPACE, brokerSelector, 3, brokerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(componentsNamespaceName, brokerSelector, 3, brokerPods); } - makeSnapshots(); + makeComponentsSnapshots(componentsNamespaceName); } if (kafkaVersionFromProcedure != null && !kafkaVersionFromProcedure.isEmpty() && !kafkaVersionFromCR.contains(kafkaVersionFromProcedure) && ResourceManager.getTestContext().getTestClass().get().getSimpleName().toLowerCase(Locale.ROOT).contains("downgrade")) { LOGGER.info("Set Kafka version to " + kafkaVersionFromProcedure); - cmdKubeClient().patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/version", kafkaVersionFromProcedure); + cmdKubeClient(componentsNamespaceName).patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/version", kafkaVersionFromProcedure); LOGGER.info("Waiting for Kafka rolling update to finish"); - brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(TestConstants.CO_NAMESPACE, brokerSelector, brokerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(componentsNamespaceName, brokerSelector, brokerPods); } } } - protected void logPodImages(String namespaceName) { - logPodImages(namespaceName, controllerSelector, brokerSelector, eoSelector, coSelector); + protected void logComponentsPodImagesWithConnect(String componentsNamespaceName) { + logPodImages(componentsNamespaceName, controllerSelector, brokerSelector, eoSelector, connectLabelSelector); } - protected void logPodImagesWithConnect(String namespaceName) { - logPodImages(namespaceName, controllerSelector, brokerSelector, eoSelector, connectLabelSelector, coSelector); + protected void logComponentsPodImages(String componentsNamespaceName) { + logPodImages(componentsNamespaceName, controllerSelector, brokerSelector, eoSelector); + } + + protected void logClusterOperatorPodImage(String clusterOperatorNamespaceName) { + logPodImages(clusterOperatorNamespaceName, coSelector); } protected void logPodImages(String namespaceName, LabelSelector... labelSelectors) { @@ -235,26 +236,28 @@ protected void logPodImages(String namespaceName, LabelSelector... labelSelector )); } - protected void waitForKafkaClusterRollingUpdate() { + protected void waitForKafkaClusterRollingUpdate(final String componentsNamespaceName) { LOGGER.info("Waiting for ZK StrimziPodSet roll"); - controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, controllerSelector, 3, controllerPods); + controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(componentsNamespaceName, controllerSelector, 3, controllerPods); LOGGER.info("Waiting for Kafka StrimziPodSet roll"); - brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, brokerSelector, 3, brokerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(componentsNamespaceName, brokerSelector, 3, brokerPods); LOGGER.info("Waiting for EO Deployment roll"); // Check the TO and UO also got upgraded - eoPods = DeploymentUtils.waitTillDepHasRolled(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods); + eoPods = DeploymentUtils.waitTillDepHasRolled(componentsNamespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods); } - protected void waitForReadinessOfKafkaCluster() { + protected void waitForReadinessOfKafkaCluster(final String componentsNamespaceName) { LOGGER.info("Waiting for ZooKeeper StrimziPodSet"); - RollingUpdateUtils.waitForComponentAndPodsReady(TestConstants.CO_NAMESPACE, controllerSelector, 3); + RollingUpdateUtils.waitForComponentAndPodsReady(componentsNamespaceName, controllerSelector, 3); LOGGER.info("Waiting for Kafka StrimziPodSet"); - RollingUpdateUtils.waitForComponentAndPodsReady(TestConstants.CO_NAMESPACE, brokerSelector, 3); + RollingUpdateUtils.waitForComponentAndPodsReady(componentsNamespaceName, brokerSelector, 3); LOGGER.info("Waiting for EO Deployment"); - DeploymentUtils.waitForDeploymentAndPodsReady(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1); + DeploymentUtils.waitForDeploymentAndPodsReady(componentsNamespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1); } - protected void changeClusterOperator(BundleVersionModificationData versionModificationData, String namespace) throws IOException { + protected void changeClusterOperator(String clusterOperatorNamespaceName, String componentsNamespaceName, BundleVersionModificationData versionModificationData) throws IOException { + final Map coPods = DeploymentUtils.depSnapshot(clusterOperatorNamespaceName, ResourceManager.getCoDeploymentName()); + File coDir; // Modify + apply installation files LOGGER.info("Update CO from {} to {}", versionModificationData.getFromVersion(), versionModificationData.getToVersion()); @@ -266,47 +269,73 @@ protected void changeClusterOperator(BundleVersionModificationData versionModifi coDir = new File(dir, versionModificationData.getToExamples() + "/install/cluster-operator/"); } - copyModifyApply(coDir, namespace, versionModificationData.getFeatureGatesAfter()); + modifyApplyClusterOperatorWithCRDsFromFile(clusterOperatorNamespaceName, componentsNamespaceName, coDir, versionModificationData.getFeatureGatesAfter()); LOGGER.info("Waiting for CO upgrade"); - DeploymentUtils.waitTillDepHasRolled(namespace, ResourceManager.getCoDeploymentName(), 1, coPods); + DeploymentUtils.waitTillDepHasRolled(clusterOperatorNamespaceName, ResourceManager.getCoDeploymentName(), 1, coPods); } - protected void copyModifyApply(File root, String namespace, final String strimziFeatureGatesValue) { - KubeClusterResource.getInstance().setNamespace(namespace); + /** + * Series of steps done when applying operator from files located in root directory. Operator deployment is modified + * to watch multiple (single) namespace. All role based access control resources are modified so the subject is found + * in operator namespace. Role bindings concerning operands are modified to be deployed in watched namespace. + * + * @param clusterOperatorNamespaceName the name of the namespace where the Strimzi operator is deployed. + * @param componentsNamespaceName the name of the single namespace being watched and managed by the Strimzi operator. + * @param root the root directory containing the YAML files to be processed. + * @param strimziFeatureGatesValue the value of the Strimzi feature gates to be injected into deployment configurations. + */ + protected void modifyApplyClusterOperatorWithCRDsFromFile(String clusterOperatorNamespaceName, String componentsNamespaceName, File root, final String strimziFeatureGatesValue) { + KubeClusterResource.getInstance().setNamespace(clusterOperatorNamespaceName); + + final List watchedNsRoleBindingFilePrefixes = List.of( + "020-RoleBinding", // rb to role for creating KNative resources + "023-RoleBinding", // rb to role for watching Strimzi custom resources + "031-RoleBinding" // rb to role for entity operator + ); Arrays.stream(Objects.requireNonNull(root.listFiles())).sorted().forEach(f -> { - if (f.getName().matches(".*RoleBinding.*")) { - cmdKubeClient().replaceContent(TestUtils.changeRoleBindingSubject(f, namespace)); + if (watchedNsRoleBindingFilePrefixes.stream().anyMatch((rbFilePrefix) -> f.getName().startsWith(rbFilePrefix))) { + cmdKubeClient(componentsNamespaceName).replaceContent(TestUtils.changeRoleBindingSubject(f, clusterOperatorNamespaceName)); + } else if (f.getName().matches(".*RoleBinding.*")) { + cmdKubeClient(clusterOperatorNamespaceName).replaceContent(TestUtils.changeRoleBindingSubject(f, clusterOperatorNamespaceName)); } else if (f.getName().matches(".*Deployment.*")) { - cmdKubeClient().replaceContent(StUtils.changeDeploymentConfiguration(f, namespace, strimziFeatureGatesValue)); + cmdKubeClient(clusterOperatorNamespaceName).replaceContent(StUtils.changeDeploymentConfiguration(componentsNamespaceName, f, strimziFeatureGatesValue)); } else { - cmdKubeClient().replaceContent(TestUtils.getContent(f, TestUtils::toYamlString)); + cmdKubeClient(clusterOperatorNamespaceName).replaceContent(TestUtils.getContent(f, TestUtils::toYamlString)); } }); } - protected void deleteInstalledYamls(File root, String namespace) { + protected void deleteInstalledYamls(String clusterOperatorNamespaceName, String componentsNamespaceName, File root) { if (kafkaUserYaml != null) { LOGGER.info("Deleting KafkaUser configuration files"); - cmdKubeClient().delete(kafkaUserYaml); + cmdKubeClient(componentsNamespaceName).delete(kafkaUserYaml); } if (kafkaTopicYaml != null) { LOGGER.info("Deleting KafkaTopic configuration files"); - KafkaTopicUtils.setFinalizersInAllTopicsToNull(namespace); + KafkaTopicUtils.setFinalizersInAllTopicsToNull(componentsNamespaceName); cmdKubeClient().delete(kafkaTopicYaml); } if (kafkaYaml != null) { LOGGER.info("Deleting Kafka configuration files"); - cmdKubeClient().delete(kafkaYaml); + cmdKubeClient(componentsNamespaceName).delete(kafkaYaml); } if (root != null) { + final List watchedNsRoleBindingFilePrefixes = List.of( + "020-RoleBinding", // rb to role for creating KNative resources + "023-RoleBinding", // rb to role for watching Strimzi custom resources + "031-RoleBinding" // rb to role for entity operator + ); + Arrays.stream(Objects.requireNonNull(root.listFiles())).sorted().forEach(f -> { try { - if (f.getName().matches(".*RoleBinding.*")) { - cmdKubeClient().deleteContent(TestUtils.changeRoleBindingSubject(f, namespace)); + if (watchedNsRoleBindingFilePrefixes.stream().anyMatch((rbFilePrefix) -> f.getName().startsWith(rbFilePrefix))) { + cmdKubeClient(componentsNamespaceName).deleteContent(TestUtils.changeRoleBindingSubject(f, clusterOperatorNamespaceName)); + } else if (f.getName().matches(".*RoleBinding.*")) { + cmdKubeClient(clusterOperatorNamespaceName).deleteContent(TestUtils.changeRoleBindingSubject(f, clusterOperatorNamespaceName)); } else { - cmdKubeClient().delete(f); + cmdKubeClient(clusterOperatorNamespaceName).delete(f); } } catch (Exception ex) { LOGGER.warn("Failed to delete resources: {}", f.getName()); @@ -315,23 +344,23 @@ protected void deleteInstalledYamls(File root, String namespace) { } } - protected void checkAllImages(BundleVersionModificationData versionModificationData, String namespaceName) { + protected void checkAllComponentsImages(String componentsNamespaceName, BundleVersionModificationData versionModificationData) { if (versionModificationData.getImagesAfterOperations().isEmpty()) { fail("There are no expected images"); } - checkContainerImages(controllerSelector, versionModificationData.getZookeeperImage()); - checkContainerImages(brokerSelector, versionModificationData.getKafkaImage()); - checkContainerImages(eoSelector, versionModificationData.getTopicOperatorImage()); - checkContainerImages(eoSelector, 1, versionModificationData.getUserOperatorImage()); + checkContainerImages(componentsNamespaceName, controllerSelector, versionModificationData.getZookeeperImage()); + checkContainerImages(componentsNamespaceName, brokerSelector, versionModificationData.getKafkaImage()); + checkContainerImages(componentsNamespaceName, eoSelector, versionModificationData.getTopicOperatorImage()); + checkContainerImages(componentsNamespaceName, eoSelector, 1, versionModificationData.getUserOperatorImage()); } - protected void checkContainerImages(LabelSelector labelSelector, String image) { - checkContainerImages(labelSelector, 0, image); + protected void checkContainerImages(String namespaceName, LabelSelector labelSelector, String image) { + checkContainerImages(namespaceName, labelSelector, 0, image); } - protected void checkContainerImages(LabelSelector labelSelector, int container, String image) { - List pods1 = kubeClient().listPods(labelSelector); + protected void checkContainerImages(String namespaceName, LabelSelector labelSelector, int container, String image) { + List pods1 = kubeClient(namespaceName).listPods(labelSelector); for (Pod pod : pods1) { if (!image.equals(pod.getSpec().getContainers().get(container).getImage())) { LOGGER.debug("Expected image for Pod: {}/{}: {} \nCurrent image: {}", pod.getMetadata().getNamespace(), pod.getMetadata().getName(), image, pod.getSpec().getContainers().get(container).getImage()); @@ -340,14 +369,13 @@ protected void checkContainerImages(LabelSelector labelSelector, int container, } } - protected void setupEnvAndUpgradeClusterOperator(BundleVersionModificationData upgradeData, TestStorage testStorage, UpgradeKafkaVersion upgradeKafkaVersion, String namespace) throws IOException { + protected void setupEnvAndUpgradeClusterOperator(String clusterOperatorNamespaceName, TestStorage testStorage, BundleVersionModificationData upgradeData, UpgradeKafkaVersion upgradeKafkaVersion) throws IOException { LOGGER.info("Test upgrade of Cluster Operator from version: {} to version: {}", upgradeData.getFromVersion(), upgradeData.getToVersion()); - cluster.setNamespace(namespace); - this.deployCoWithWaitForReadiness(upgradeData, namespace); - this.deployKafkaClusterWithWaitForReadiness(upgradeData, upgradeKafkaVersion); - this.deployKafkaUserWithWaitForReadiness(upgradeData, namespace); - this.deployKafkaTopicWithWaitForReadiness(upgradeData); + this.deployCoWithWaitForReadiness(clusterOperatorNamespaceName, testStorage.getNamespaceName(), upgradeData); + this.deployKafkaClusterWithWaitForReadiness(testStorage.getNamespaceName(), upgradeData, upgradeKafkaVersion); + this.deployKafkaUserWithWaitForReadiness(testStorage.getNamespaceName(), upgradeData); + this.deployKafkaTopicWithWaitForReadiness(testStorage.getNamespaceName(), upgradeData); // Create bunch of topics for upgrade if it's specified in configuration if (upgradeData.getAdditionalTopics() != null && upgradeData.getAdditionalTopics() > 0) { @@ -362,7 +390,7 @@ protected void setupEnvAndUpgradeClusterOperator(BundleVersionModificationData u .mapToObj(topicNameTemplate::formatted) .map(this::getKafkaYamlWithName) .parallel() - .forEach(cmdKubeClient()::applyContent); + .forEach(cmdKubeClient(testStorage.getNamespaceName())::applyContent); } if (upgradeData.getContinuousClientsMessages() != 0) { @@ -370,17 +398,17 @@ protected void setupEnvAndUpgradeClusterOperator(BundleVersionModificationData u // Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update // ############################## // Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update - if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL)).contains(testStorage.getTopicName())) { + if (!cmdKubeClient(testStorage.getNamespaceName()).getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL)).contains(testStorage.getTopicName())) { String pathToTopicExamples = upgradeData.getFromExamples().equals("HEAD") ? PATH_TO_KAFKA_TOPIC_CONFIG : upgradeData.getFromExamples() + "/examples/topic/kafka-topic.yaml"; kafkaTopicYaml = new File(dir, pathToTopicExamples); - cmdKubeClient().applyContent(TestUtils.getContent(kafkaTopicYaml, TestUtils::toYamlString) + cmdKubeClient(testStorage.getNamespaceName()).applyContent(TestUtils.getContent(kafkaTopicYaml, TestUtils::toYamlString) .replace("name: \"my-topic\"", "name: \"" + testStorage.getTopicName() + "\"") .replace("partitions: 1", "partitions: 3") .replace("replicas: 1", "replicas: 3") + " min.insync.replicas: 2"); - ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL), testStorage.getTopicName()); + ResourceManager.waitForResourceReadiness(testStorage.getNamespaceName(), getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL), testStorage.getTopicName()); } // 40s is used within TF environment to make upgrade/downgrade more stable on slow env @@ -390,7 +418,7 @@ protected void setupEnvAndUpgradeClusterOperator(BundleVersionModificationData u .withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)) .withMessageCount(upgradeData.getContinuousClientsMessages()) .withAdditionalConfig(producerAdditionConfiguration) - .withNamespaceName(namespace) + .withNamespaceName(testStorage.getNamespaceName()) .build(); resourceManager.createResourceWithWait( @@ -400,7 +428,7 @@ protected void setupEnvAndUpgradeClusterOperator(BundleVersionModificationData u // ############################## } - makeSnapshots(); + makeComponentsSnapshots(testStorage.getNamespaceName()); } private String getKafkaYamlWithName(String name) { @@ -410,13 +438,13 @@ private String getKafkaYamlWithName(String name) { return TestUtils.getContent(kafkaTopicYaml, TestUtils::toYamlString).replace(initialName, newName); } - protected void verifyProcedure(BundleVersionModificationData upgradeData, String producerName, String consumerName, String namespace, boolean wasUTOUsedBefore) { + protected void verifyProcedure(String componentsNamespaceNames, BundleVersionModificationData upgradeData, String producerName, String consumerName, boolean wasUTOUsedBefore) { if (upgradeData.getAdditionalTopics() != null) { - boolean isUTOUsed = StUtils.isUnidirectionalTopicOperatorUsed(namespace, eoSelector); + boolean isUTOUsed = StUtils.isUnidirectionalTopicOperatorUsed(componentsNamespaceNames, eoSelector); // Check that topics weren't deleted/duplicated during upgrade procedures - String listedTopics = cmdKubeClient().getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL)); + String listedTopics = cmdKubeClient(componentsNamespaceNames).getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL)); int additionalTopics = upgradeData.getAdditionalTopics(); assertThat("KafkaTopic list doesn't have expected size", Long.valueOf(listedTopics.lines().count() - 1).intValue(), greaterThanOrEqualTo(getExpectedTopicCount(isUTOUsed, wasUTOUsedBefore) + additionalTopics)); assertThat("KafkaTopic " + topicName + " is not in expected Topic list", @@ -430,7 +458,7 @@ protected void verifyProcedure(BundleVersionModificationData upgradeData, String // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForClientsSuccess(producerName, consumerName, namespace, upgradeData.getContinuousClientsMessages()); + ClientUtils.waitForClientsSuccess(producerName, consumerName, componentsNamespaceNames, upgradeData.getContinuousClientsMessages()); // ############################## } } @@ -439,9 +467,8 @@ protected String getResourceApiVersion(String resourcePlural) { return resourcePlural + "." + Constants.V1BETA2 + "." + Constants.RESOURCE_GROUP_NAME; } - protected void deployCoWithWaitForReadiness(final BundleVersionModificationData upgradeData, - final String namespaceName) throws IOException { - LOGGER.info("Deploying CO: {} in Namespace: {}", ResourceManager.getCoDeploymentName(), namespaceName); + protected void deployCoWithWaitForReadiness(final String clusterOperatorNamespaceName, final String componentsNamespaceName, final BundleVersionModificationData upgradeData) throws IOException { + LOGGER.info("Deploying CO: {} in Namespace: {}", ResourceManager.getCoDeploymentName(), clusterOperatorNamespaceName); if (upgradeData.getFromVersion().equals("HEAD")) { coDir = new File(TestUtils.USER_PATH + "/../packaging/install/cluster-operator"); @@ -452,23 +479,24 @@ protected void deployCoWithWaitForReadiness(final BundleVersionModificationData } // Modify + apply installation files - copyModifyApply(coDir, namespaceName, upgradeData.getFeatureGatesBefore()); + modifyApplyClusterOperatorWithCRDsFromFile(clusterOperatorNamespaceName, componentsNamespaceName, coDir, upgradeData.getFeatureGatesBefore()); - LOGGER.info("Waiting for Deployment: {}", ResourceManager.getCoDeploymentName()); - DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, ResourceManager.getCoDeploymentName(), 1); + LOGGER.info("Waiting for Cluster Operator Deployment: {}", ResourceManager.getCoDeploymentName()); + DeploymentUtils.waitForDeploymentAndPodsReady(clusterOperatorNamespaceName, ResourceManager.getCoDeploymentName(), 1); LOGGER.info("{} is ready", ResourceManager.getCoDeploymentName()); } - protected void deployKafkaClusterWithWaitForReadiness(final BundleVersionModificationData upgradeData, + protected void deployKafkaClusterWithWaitForReadiness(final String componentsNamespaceName, + final BundleVersionModificationData upgradeData, final UpgradeKafkaVersion upgradeKafkaVersion) { - LOGGER.info("Deploying Kafka: {} in Namespace: {}", clusterName, kubeClient().getNamespace()); + LOGGER.info("Deploying Kafka: {}/{}", componentsNamespaceName, clusterName); - if (!cmdKubeClient().getResources(getResourceApiVersion(Kafka.RESOURCE_PLURAL)).contains(clusterName)) { + if (!cmdKubeClient(componentsNamespaceName).getResources(getResourceApiVersion(Kafka.RESOURCE_PLURAL)).contains(clusterName)) { // Deploy a Kafka cluster if (upgradeData.getFromExamples().equals("HEAD")) { - resourceManager.createResourceWithWait(KafkaNodePoolTemplates.brokerPoolPersistentStorage(CO_NAMESPACE, poolName, clusterName, 3).build()); - resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(CO_NAMESPACE, clusterName, 3, 3) + resourceManager.createResourceWithWait(KafkaNodePoolTemplates.brokerPoolPersistentStorage(componentsNamespaceName, poolName, clusterName, 3).build()); + resourceManager.createResourceWithWait(KafkaTemplates.kafkaPersistent(componentsNamespaceName, clusterName, 3, 3) .editSpec() .editKafka() .withVersion(upgradeKafkaVersion.getVersion()) @@ -482,52 +510,49 @@ protected void deployKafkaClusterWithWaitForReadiness(final BundleVersionModific LOGGER.info("Deploying Kafka from: {}", kafkaYaml.getPath()); // Change kafka version of it's empty (null is for remove the version) if (upgradeKafkaVersion == null) { - cmdKubeClient().applyContent(KafkaUtils.changeOrRemoveKafkaVersion(kafkaYaml, null)); + cmdKubeClient(componentsNamespaceName).applyContent(KafkaUtils.changeOrRemoveKafkaVersion(kafkaYaml, null)); } else { - cmdKubeClient().applyContent(KafkaUtils.changeOrRemoveKafkaConfiguration(kafkaYaml, upgradeKafkaVersion.getVersion(), upgradeKafkaVersion.getLogMessageVersion(), upgradeKafkaVersion.getInterBrokerVersion())); + cmdKubeClient(componentsNamespaceName).applyContent(KafkaUtils.changeOrRemoveKafkaConfiguration(kafkaYaml, upgradeKafkaVersion.getVersion(), upgradeKafkaVersion.getLogMessageVersion(), upgradeKafkaVersion.getInterBrokerVersion())); } // Wait for readiness - waitForReadinessOfKafkaCluster(); + waitForReadinessOfKafkaCluster(componentsNamespaceName); } } } - protected void deployKafkaUserWithWaitForReadiness(final BundleVersionModificationData upgradeData, - final String namespaceName) { - LOGGER.info("Deploying KafkaUser: {}/{}", kubeClient().getNamespace(), userName); + protected void deployKafkaUserWithWaitForReadiness(final String componentsNamespaceName, final BundleVersionModificationData upgradeData) { + LOGGER.info("Deploying KafkaUser: {}/{}", componentsNamespaceName, userName); - if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaUser.RESOURCE_PLURAL)).contains(userName)) { + if (!cmdKubeClient(componentsNamespaceName).getResources(getResourceApiVersion(KafkaUser.RESOURCE_PLURAL)).contains(userName)) { if (upgradeData.getFromVersion().equals("HEAD")) { - resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(namespaceName, userName, clusterName).build()); + resourceManager.createResourceWithWait(KafkaUserTemplates.tlsUser(componentsNamespaceName, userName, clusterName).build()); } else { kafkaUserYaml = new File(dir, upgradeData.getFromExamples() + "/examples/user/kafka-user.yaml"); LOGGER.info("Deploying KafkaUser from: {}", kafkaUserYaml.getPath()); - cmdKubeClient().applyContent(KafkaUserUtils.removeKafkaUserPart(kafkaUserYaml, "authorization")); - ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaUser.RESOURCE_PLURAL), userName); + cmdKubeClient(componentsNamespaceName).applyContent(KafkaUserUtils.removeKafkaUserPart(kafkaUserYaml, "authorization")); + ResourceManager.waitForResourceReadiness(componentsNamespaceName, getResourceApiVersion(KafkaUser.RESOURCE_PLURAL), userName); } } } - protected void deployKafkaTopicWithWaitForReadiness(final BundleVersionModificationData upgradeData) { - LOGGER.info("Deploying KafkaTopic: {}/{}", kubeClient().getNamespace(), topicName); + protected void deployKafkaTopicWithWaitForReadiness(final String componentsNamespaceName, final BundleVersionModificationData upgradeData) { + LOGGER.info("Deploying KafkaTopic: {}/{}", componentsNamespaceName, topicName); - if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL)).contains(topicName)) { + if (!cmdKubeClient(componentsNamespaceName).getResources(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL)).contains(topicName)) { if (upgradeData.getFromVersion().equals("HEAD")) { kafkaTopicYaml = new File(dir, PATH_TO_PACKAGING_EXAMPLES + "/topic/kafka-topic.yaml"); } else { kafkaTopicYaml = new File(dir, upgradeData.getFromExamples() + "/examples/topic/kafka-topic.yaml"); } LOGGER.info("Deploying KafkaTopic from: {}", kafkaTopicYaml.getPath()); - cmdKubeClient().create(kafkaTopicYaml); - ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL), topicName); + cmdKubeClient(componentsNamespaceName).create(kafkaTopicYaml); + ResourceManager.waitForResourceReadiness(componentsNamespaceName, getResourceApiVersion(KafkaTopic.RESOURCE_PLURAL), topicName); } } - protected void deployKafkaConnectAndKafkaConnectorWithWaitForReadiness(final BundleVersionModificationData acrossUpgradeData, - final UpgradeKafkaVersion upgradeKafkaVersion, - final TestStorage testStorage) { + protected void deployKafkaConnectAndKafkaConnectorWithWaitForReadiness(final TestStorage testStorage, final BundleVersionModificationData acrossUpgradeData, final UpgradeKafkaVersion upgradeKafkaVersion) { // setup KafkaConnect + KafkaConnector - if (!cmdKubeClient().getResources(getResourceApiVersion(KafkaConnect.RESOURCE_PLURAL)).contains(clusterName)) { + if (!cmdKubeClient(testStorage.getNamespaceName()).getResources(getResourceApiVersion(KafkaConnect.RESOURCE_PLURAL)).contains(clusterName)) { if (acrossUpgradeData.getFromVersion().equals("HEAD")) { resourceManager.createResourceWithWait(KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), clusterName, 1) .editMetadata() @@ -582,8 +607,8 @@ protected void deployKafkaConnectAndKafkaConnectorWithWaitForReadiness(final Bun LOGGER.info("Deploying KafkaConnect from: {}", kafkaConnectYaml.getPath()); - cmdKubeClient().applyContent(TestUtils.toYamlString(kafkaConnect)); - ResourceManager.waitForResourceReadiness(getResourceApiVersion(KafkaConnect.RESOURCE_PLURAL), kafkaConnect.getMetadata().getName()); + cmdKubeClient(testStorage.getNamespaceName()).applyContent(TestUtils.toYamlString(kafkaConnect)); + ResourceManager.waitForResourceReadiness(testStorage.getNamespaceName(), getResourceApiVersion(KafkaConnect.RESOURCE_PLURAL), kafkaConnect.getMetadata().getName()); // in our examples is no sink connector and thus we are using the same as in HEAD verification resourceManager.createResourceWithWait(KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), clusterName) @@ -600,15 +625,17 @@ protected void deployKafkaConnectAndKafkaConnectorWithWaitForReadiness(final Bun } } - protected void doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(final BundleVersionModificationData bundleDowngradeDataWithFeatureGates, + protected void doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(final String clusterOperatorNamespaceName, final TestStorage testStorage, + final BundleVersionModificationData bundleDowngradeDataWithFeatureGates, final UpgradeKafkaVersion upgradeKafkaVersion) throws IOException { - this.deployCoWithWaitForReadiness(bundleDowngradeDataWithFeatureGates, testStorage.getNamespaceName()); - this.deployKafkaClusterWithWaitForReadiness(bundleDowngradeDataWithFeatureGates, upgradeKafkaVersion); - this.deployKafkaConnectAndKafkaConnectorWithWaitForReadiness(bundleDowngradeDataWithFeatureGates, upgradeKafkaVersion, testStorage); - this.deployKafkaUserWithWaitForReadiness(bundleDowngradeDataWithFeatureGates, testStorage.getNamespaceName()); + this.deployCoWithWaitForReadiness(clusterOperatorNamespaceName, testStorage.getNamespaceName(), bundleDowngradeDataWithFeatureGates); + this.deployKafkaClusterWithWaitForReadiness(testStorage.getNamespaceName(), bundleDowngradeDataWithFeatureGates, upgradeKafkaVersion); + this.deployKafkaConnectAndKafkaConnectorWithWaitForReadiness(testStorage, bundleDowngradeDataWithFeatureGates, upgradeKafkaVersion); + this.deployKafkaUserWithWaitForReadiness(testStorage.getNamespaceName(), bundleDowngradeDataWithFeatureGates); final KafkaClients clients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(clusterName)) + .withNamespaceName(testStorage.getNamespaceName()) .withUsername(userName) .build(); @@ -616,27 +643,25 @@ protected void doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(final // Verify that Producer finish successfully ClientUtils.waitForInstantProducerClientSuccess(testStorage); - makeSnapshots(); - logPodImagesWithConnect(TestConstants.CO_NAMESPACE); + makeComponentsSnapshots(testStorage.getNamespaceName()); + logComponentsPodImagesWithConnect(testStorage.getNamespaceName()); // Verify FileSink KafkaConnector before upgrade String connectorPodName = kubeClient().listPods(testStorage.getNamespaceName(), Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND)).get(0).getMetadata().getName(); KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(testStorage.getNamespaceName(), connectorPodName, DEFAULT_SINK_FILE_PATH, testStorage.getMessageCount()); // Upgrade CO to HEAD and wait for readiness of ClusterOperator - changeClusterOperator(bundleDowngradeDataWithFeatureGates, testStorage.getNamespaceName()); + changeClusterOperator(clusterOperatorNamespaceName, testStorage.getNamespaceName(), bundleDowngradeDataWithFeatureGates); - // Verify that Kafka cluster RU - waitForKafkaClusterRollingUpdate(); - - // Verify that KafkaConnect pods are rolling and KafkaConnector is ready + // Verify that Kafka and Connect Pods Rolled + waitForKafkaClusterRollingUpdate(testStorage.getNamespaceName()); RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), connectLabelSelector, 1, connectPods); KafkaConnectorUtils.waitForConnectorReady(testStorage.getNamespaceName(), clusterName); // send again new messages resourceManager.createResourceWithWait(clients.producerTlsStrimzi(clusterName)); // Verify that Producer finish successfully - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + ClientUtils.waitForInstantProducerClientSuccess(testStorage.getNamespaceName(), testStorage); // Verify FileSink KafkaConnector connectorPodName = kubeClient().listPods(testStorage.getNamespaceName(), Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND)).get(0).getMetadata().getName(); KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(testStorage.getNamespaceName(), connectorPodName, DEFAULT_SINK_FILE_PATH, testStorage.getMessageCount()); @@ -654,17 +679,17 @@ protected String downloadExamplesAndGetPath(CommonVersionModificationData versio } } - protected void cleanUpKafkaTopics() { - List topics = KafkaTopicResource.kafkaTopicClient().inNamespace(CO_NAMESPACE).list().getItems(); + protected void cleanUpKafkaTopics(String componentsNamespaceName) { + List topics = KafkaTopicResource.kafkaTopicClient().inNamespace(componentsNamespaceName).list().getItems(); boolean finalizersAreSet = topics.stream().anyMatch(kafkaTopic -> kafkaTopic.getFinalizers() != null); // in case that we are upgrading/downgrading from UTO to BTO, we have to set finalizers on topics to null before deleting them - if (!StUtils.isUnidirectionalTopicOperatorUsed(CO_NAMESPACE, eoSelector) && finalizersAreSet) { - KafkaTopicUtils.setFinalizersInAllTopicsToNull(CO_NAMESPACE); + if (!StUtils.isUnidirectionalTopicOperatorUsed(componentsNamespaceName, eoSelector) && finalizersAreSet) { + KafkaTopicUtils.setFinalizersInAllTopicsToNull(componentsNamespaceName); } // delete all topics created in test - cmdKubeClient(TestConstants.CO_NAMESPACE).deleteAllByResource(KafkaTopic.RESOURCE_KIND); - KafkaTopicUtils.waitForTopicWithPrefixDeletion(TestConstants.CO_NAMESPACE, topicName); + cmdKubeClient(componentsNamespaceName).deleteAllByResource(KafkaTopic.RESOURCE_KIND); + KafkaTopicUtils.waitForTopicWithPrefixDeletion(componentsNamespaceName, topicName); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/AbstractKRaftUpgradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/AbstractKRaftUpgradeST.java index 0194bf91e14..a3d9ba485c0 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/AbstractKRaftUpgradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/AbstractKRaftUpgradeST.java @@ -9,8 +9,7 @@ import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.nodepool.ProcessRoles; import io.strimzi.operator.common.Annotations; -import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.resources.ResourceManager; +import io.strimzi.systemtest.resources.NamespaceManager; import io.strimzi.systemtest.resources.crd.KafkaNodePoolResource; import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; import io.strimzi.systemtest.templates.crd.KafkaTemplates; @@ -26,13 +25,16 @@ import io.strimzi.test.TestUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import java.io.File; import java.io.IOException; import java.util.Map; +import static io.strimzi.systemtest.Environment.TEST_SUITE_NAMESPACE; +import static io.strimzi.systemtest.TestConstants.CO_NAMESPACE; import static io.strimzi.test.k8s.KubeClusterResource.cmdKubeClient; -import static io.strimzi.test.k8s.KubeClusterResource.kubeClient; import static org.junit.jupiter.api.Assertions.fail; public class AbstractKRaftUpgradeST extends AbstractUpgradeST { @@ -49,26 +51,26 @@ public class AbstractKRaftUpgradeST extends AbstractUpgradeST { protected final LabelSelector brokerSelector = KafkaNodePoolResource.getLabelSelector(clusterName, BROKER_NODE_NAME, ProcessRoles.BROKER); @Override - protected void makeSnapshots() { - coPods = DeploymentUtils.depSnapshot(TestConstants.CO_NAMESPACE, ResourceManager.getCoDeploymentName()); - eoPods = DeploymentUtils.depSnapshot(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName)); - controllerPods = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, controllerSelector); - brokerPods = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, brokerSelector); - connectPods = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, connectLabelSelector); + protected void makeComponentsSnapshots(String componentsNamespaceName) { + eoPods = DeploymentUtils.depSnapshot(componentsNamespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)); + controllerPods = PodUtils.podSnapshot(componentsNamespaceName, controllerSelector); + brokerPods = PodUtils.podSnapshot(componentsNamespaceName, brokerSelector); + connectPods = PodUtils.podSnapshot(componentsNamespaceName, connectLabelSelector); } @Override - protected void deployKafkaClusterWithWaitForReadiness(final BundleVersionModificationData upgradeData, + protected void deployKafkaClusterWithWaitForReadiness(final String componentsNamespaceName, + final BundleVersionModificationData upgradeData, final UpgradeKafkaVersion upgradeKafkaVersion) { - LOGGER.info("Deploying Kafka: {} in Namespace: {}", clusterName, kubeClient().getNamespace()); + LOGGER.info("Deploying Kafka: {}/{}", componentsNamespaceName, clusterName); - if (!cmdKubeClient().getResources(getResourceApiVersion(Kafka.RESOURCE_PLURAL)).contains(clusterName)) { + if (!cmdKubeClient(componentsNamespaceName).getResources(getResourceApiVersion(Kafka.RESOURCE_PLURAL)).contains(clusterName)) { // Deploy a Kafka cluster if (upgradeData.getFromExamples().equals("HEAD")) { resourceManager.createResourceWithWait( - KafkaNodePoolTemplates.controllerPoolPersistentStorage(TestConstants.CO_NAMESPACE, CONTROLLER_NODE_NAME, clusterName, 3).build(), - KafkaNodePoolTemplates.brokerPoolPersistentStorage(TestConstants.CO_NAMESPACE, BROKER_NODE_NAME, clusterName, 3).build(), - KafkaTemplates.kafkaPersistentKRaft(TestConstants.CO_NAMESPACE, clusterName, 3) + KafkaNodePoolTemplates.controllerPoolPersistentStorage(componentsNamespaceName, CONTROLLER_NODE_NAME, clusterName, 3).build(), + KafkaNodePoolTemplates.brokerPoolPersistentStorage(componentsNamespaceName, BROKER_NODE_NAME, clusterName, 3).build(), + KafkaTemplates.kafkaPersistentKRaft(componentsNamespaceName, clusterName, 3) .editMetadata() .addToAnnotations(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled") .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled") @@ -85,39 +87,39 @@ protected void deployKafkaClusterWithWaitForReadiness(final BundleVersionModific LOGGER.info("Deploying Kafka from: {}", kafkaYaml.getPath()); // Change kafka version of it's empty (null is for remove the version) if (upgradeKafkaVersion == null) { - cmdKubeClient().applyContent(KafkaUtils.changeOrRemoveKafkaInKRaft(kafkaYaml, null)); + cmdKubeClient(componentsNamespaceName).applyContent(KafkaUtils.changeOrRemoveKafkaInKRaft(kafkaYaml, null)); } else { - cmdKubeClient().applyContent(KafkaUtils.changeOrRemoveKafkaConfigurationInKRaft(kafkaYaml, upgradeKafkaVersion.getVersion(), upgradeKafkaVersion.getMetadataVersion())); + cmdKubeClient(componentsNamespaceName).applyContent(KafkaUtils.changeOrRemoveKafkaConfigurationInKRaft(kafkaYaml, upgradeKafkaVersion.getVersion(), upgradeKafkaVersion.getMetadataVersion())); } // Wait for readiness - waitForReadinessOfKafkaCluster(); + waitForReadinessOfKafkaCluster(componentsNamespaceName); } } } @Override - protected void waitForKafkaClusterRollingUpdate() { + protected void waitForKafkaClusterRollingUpdate(final String componentsNamespaceName) { LOGGER.info("Waiting for Kafka Pods with controller role to be rolled"); - controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, controllerSelector, 3, controllerPods); + controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(componentsNamespaceName, controllerSelector, 3, controllerPods); LOGGER.info("Waiting for Kafka Pods with broker role to be rolled"); - brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, brokerSelector, 3, brokerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(componentsNamespaceName, brokerSelector, 3, brokerPods); LOGGER.info("Waiting for EO Deployment to be rolled"); // Check the TO and UO also got upgraded - eoPods = DeploymentUtils.waitTillDepHasRolled(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods); + eoPods = DeploymentUtils.waitTillDepHasRolled(componentsNamespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods); } @Override - protected void waitForReadinessOfKafkaCluster() { + protected void waitForReadinessOfKafkaCluster(final String componentsNamespaceName) { LOGGER.info("Waiting for Kafka Pods with controller role to be ready"); - RollingUpdateUtils.waitForComponentAndPodsReady(TestConstants.CO_NAMESPACE, controllerSelector, 3); + RollingUpdateUtils.waitForComponentAndPodsReady(componentsNamespaceName, controllerSelector, 3); LOGGER.info("Waiting for Kafka Pods with broker role to be ready"); - RollingUpdateUtils.waitForComponentAndPodsReady(TestConstants.CO_NAMESPACE, brokerSelector, 3); + RollingUpdateUtils.waitForComponentAndPodsReady(componentsNamespaceName, brokerSelector, 3); LOGGER.info("Waiting for EO Deployment"); - DeploymentUtils.waitForDeploymentAndPodsReady(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1); + DeploymentUtils.waitForDeploymentAndPodsReady(componentsNamespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1); } - protected void changeKafkaAndMetadataVersion(CommonVersionModificationData versionModificationData) throws IOException { - changeKafkaAndMetadataVersion(versionModificationData, false); + protected void changeKafkaAndMetadataVersion(final String componentsNamespaceName, CommonVersionModificationData versionModificationData) throws IOException { + changeKafkaAndMetadataVersion(componentsNamespaceName, versionModificationData, false); } /** @@ -129,12 +131,12 @@ protected void changeKafkaAndMetadataVersion(CommonVersionModificationData versi * @throws IOException exception during application of YAML files */ @SuppressWarnings("CyclomaticComplexity") - protected void changeKafkaAndMetadataVersion(CommonVersionModificationData versionModificationData, boolean replaceEvenIfMissing) throws IOException { + protected void changeKafkaAndMetadataVersion(final String componentsNamespaceName, CommonVersionModificationData versionModificationData, boolean replaceEvenIfMissing) throws IOException { // Get Kafka version - String kafkaVersionFromCR = cmdKubeClient().getResourceJsonPath(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, ".spec.kafka.version"); + String kafkaVersionFromCR = cmdKubeClient(componentsNamespaceName).getResourceJsonPath(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, ".spec.kafka.version"); kafkaVersionFromCR = kafkaVersionFromCR.equals("") ? null : kafkaVersionFromCR; // Get Kafka metadata version - String currentMetadataVersion = cmdKubeClient().getResourceJsonPath(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, ".spec.kafka.metadataVersion"); + String currentMetadataVersion = cmdKubeClient(componentsNamespaceName).getResourceJsonPath(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, ".spec.kafka.metadataVersion"); String kafkaVersionFromProcedure = versionModificationData.getProcedures().getVersion(); @@ -144,7 +146,7 @@ protected void changeKafkaAndMetadataVersion(CommonVersionModificationData versi String examplesPath = downloadExamplesAndGetPath(versionModificationData); String kafkaFilePath = examplesPath + versionModificationData.getKafkaKRaftFilePathAfter(); - applyCustomResourcesFromPath(examplesPath, kafkaFilePath, kafkaVersionFromCR, currentMetadataVersion); + applyCustomResourcesFromPath(componentsNamespaceName, examplesPath, kafkaFilePath, kafkaVersionFromCR, currentMetadataVersion); // ####################################################################### @@ -152,48 +154,50 @@ protected void changeKafkaAndMetadataVersion(CommonVersionModificationData versi if (kafkaVersionFromProcedure != null && !kafkaVersionFromProcedure.isEmpty() && !kafkaVersionFromCR.contains(kafkaVersionFromProcedure)) { LOGGER.info("Set Kafka version to " + kafkaVersionFromProcedure); - cmdKubeClient().patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/version", kafkaVersionFromProcedure); + cmdKubeClient(componentsNamespaceName).patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/version", kafkaVersionFromProcedure); - waitForKafkaControllersAndBrokersFinishRollingUpdate(); + waitForKafkaControllersAndBrokersFinishRollingUpdate(componentsNamespaceName); } String metadataVersion = versionModificationData.getProcedures().getMetadataVersion(); if (metadataVersion != null && !metadataVersion.isEmpty()) { LOGGER.info("Set metadata version to {} (current version is {})", metadataVersion, currentMetadataVersion); - cmdKubeClient().patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/metadataVersion", metadataVersion); + cmdKubeClient(componentsNamespaceName).patchResource(getResourceApiVersion(Kafka.RESOURCE_PLURAL), clusterName, "/spec/kafka/metadataVersion", metadataVersion); - makeSnapshots(); + makeComponentsSnapshots(componentsNamespaceName); } } } @Override - protected void checkAllImages(BundleVersionModificationData versionModificationData, String namespaceName) { + protected void checkAllComponentsImages(String componentsNamespaceName, BundleVersionModificationData versionModificationData) { if (versionModificationData.getImagesAfterOperations().isEmpty()) { fail("There are no expected images"); } - checkContainerImages(controllerSelector, versionModificationData.getKafkaImage()); - checkContainerImages(brokerSelector, versionModificationData.getKafkaImage()); - checkContainerImages(eoSelector, versionModificationData.getTopicOperatorImage()); - checkContainerImages(eoSelector, 1, versionModificationData.getUserOperatorImage()); + checkContainerImages(componentsNamespaceName, controllerSelector, versionModificationData.getKafkaImage()); + checkContainerImages(componentsNamespaceName, brokerSelector, versionModificationData.getKafkaImage()); + checkContainerImages(componentsNamespaceName, eoSelector, versionModificationData.getTopicOperatorImage()); + checkContainerImages(componentsNamespaceName, eoSelector, 1, versionModificationData.getUserOperatorImage()); } - @Override - protected void logPodImages(String namespaceName) { - logPodImages(namespaceName, controllerSelector, brokerSelector, eoSelector, coSelector); + protected void logComponentsPodImagesWithConnect(String componentsNamespaceName) { + logPodImages(componentsNamespaceName, controllerSelector, brokerSelector, eoSelector, connectLabelSelector); } - @Override - protected void logPodImagesWithConnect(String namespaceName) { - logPodImages(namespaceName, controllerSelector, brokerSelector, eoSelector, coSelector); + protected void logComponentsPodImages(String componentsNamespaceName) { + logPodImages(componentsNamespaceName, controllerSelector, brokerSelector, eoSelector); + } + + protected void logClusterOperatorPodImage(String clusterOperatorNamespaceName) { + logPodImages(clusterOperatorNamespaceName, coSelector); } - protected void waitForKafkaControllersAndBrokersFinishRollingUpdate() { + protected void waitForKafkaControllersAndBrokersFinishRollingUpdate(String componentsNamespaceName) { LOGGER.info("Waiting for Kafka rolling update to finish"); - controllerPods = RollingUpdateUtils.waitTillComponentHasRolled(TestConstants.CO_NAMESPACE, controllerSelector, 3, controllerPods); - brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(TestConstants.CO_NAMESPACE, brokerSelector, 3, brokerPods); + controllerPods = RollingUpdateUtils.waitTillComponentHasRolled(componentsNamespaceName, controllerSelector, 3, controllerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(componentsNamespaceName, brokerSelector, 3, brokerPods); } protected void applyKafkaCustomResourceFromPath(String kafkaFilePath, String kafkaVersionFromCR, String kafkaMetadataVersion) { @@ -205,15 +209,29 @@ protected void applyKafkaCustomResourceFromPath(String kafkaFilePath, String kaf cmdKubeClient().applyContent(KafkaUtils.changeOrRemoveKafkaConfigurationInKRaft(kafkaYaml, kafkaVersionFromCR, metadataVersion)); } - protected void applyCustomResourcesFromPath(String examplesPath, String kafkaFilePath, String kafkaVersionFromCR, String kafkaMetadataVersion) { + protected void applyCustomResourcesFromPath(String namespaceName, String examplesPath, String kafkaFilePath, String kafkaVersionFromCR, String kafkaMetadataVersion) { applyKafkaCustomResourceFromPath(kafkaFilePath, kafkaVersionFromCR, kafkaMetadataVersion); kafkaUserYaml = new File(examplesPath + "/examples/user/kafka-user.yaml"); - LOGGER.info("Deploying KafkaUser from: {}", kafkaUserYaml.getPath()); - cmdKubeClient().applyContent(KafkaUserUtils.removeKafkaUserPart(kafkaUserYaml, "authorization")); + LOGGER.info("Deploying KafkaUser from: {}, in Namespace: {}", kafkaUserYaml.getPath(), namespaceName); + cmdKubeClient(namespaceName).applyContent(KafkaUserUtils.removeKafkaUserPart(kafkaUserYaml, "authorization")); kafkaTopicYaml = new File(examplesPath + "/examples/topic/kafka-topic.yaml"); - LOGGER.info("Deploying KafkaTopic from: {}", kafkaTopicYaml.getPath()); - cmdKubeClient().applyContent(TestUtils.readFile(kafkaTopicYaml)); + LOGGER.info("Deploying KafkaTopic from: {}, in Namespace {}", kafkaTopicYaml.getPath(), namespaceName); + cmdKubeClient(namespaceName).applyContent(TestUtils.readFile(kafkaTopicYaml)); + } + + @BeforeEach + void setupEnvironment() { + NamespaceManager.getInstance().createNamespaceAndPrepare(CO_NAMESPACE); + NamespaceManager.getInstance().createNamespaceAndPrepare(TEST_SUITE_NAMESPACE); + } + + @AfterEach + void afterEach() { + cleanUpKafkaTopics(TEST_SUITE_NAMESPACE); + deleteInstalledYamls(CO_NAMESPACE, TEST_SUITE_NAMESPACE, coDir); + NamespaceManager.getInstance().deleteNamespaceWithWait(CO_NAMESPACE); + NamespaceManager.getInstance().deleteNamespaceWithWait(TEST_SUITE_NAMESPACE); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftKafkaUpgradeDowngradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftKafkaUpgradeDowngradeST.java index 9b65093f845..fe943a267ec 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftKafkaUpgradeDowngradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftKafkaUpgradeDowngradeST.java @@ -8,7 +8,6 @@ import io.strimzi.api.kafka.model.kafka.KafkaBuilder; import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.operator.common.Annotations; -import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.ResourceManager; @@ -43,11 +42,11 @@ @Tag(KRAFT_UPGRADE) public class KRaftKafkaUpgradeDowngradeST extends AbstractKRaftUpgradeST { private static final Logger LOGGER = LogManager.getLogger(KRaftKafkaUpgradeDowngradeST.class); - private final int continuousClientsMessageCount = 500; + private final int continuousClientsMessageCount = 300; @IsolatedTest void testKafkaClusterUpgrade() { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); List sortedVersions = TestKafkaVersion.getSupportedKafkaVersions(); for (int x = 0; x < sortedVersions.size() - 1; x++) { @@ -57,19 +56,19 @@ void testKafkaClusterUpgrade() { // If it is an upgrade test we keep the metadata version as the lower version number String metadataVersion = initialVersion.metadataVersion(); - runVersionChange(initialVersion, newVersion, testStorage, metadataVersion, 3, 3); + runVersionChange(testStorage, initialVersion, newVersion, metadataVersion, 3, 3); } // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), testStorage.getNamespaceName(), continuousClientsMessageCount); // ############################## } @IsolatedTest void testKafkaClusterDowngrade() { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); List sortedVersions = TestKafkaVersion.getSupportedKafkaVersions(); for (int x = sortedVersions.size() - 1; x > 0; x--) { @@ -78,32 +77,32 @@ void testKafkaClusterDowngrade() { // If it is a downgrade then we make sure that we are using the lowest metadataVersion from the whole list String metadataVersion = sortedVersions.get(0).metadataVersion(); - runVersionChange(initialVersion, newVersion, testStorage, metadataVersion, 3, 3); + runVersionChange(testStorage, initialVersion, newVersion, metadataVersion, 3, 3); } // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), testStorage.getNamespaceName(), continuousClientsMessageCount); // ############################## } @IsolatedTest void testUpgradeWithNoMetadataVersionSet() { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); List sortedVersions = TestKafkaVersion.getSupportedKafkaVersions(); for (int x = 0; x < sortedVersions.size() - 1; x++) { TestKafkaVersion initialVersion = sortedVersions.get(x); TestKafkaVersion newVersion = sortedVersions.get(x + 1); - runVersionChange(initialVersion, newVersion, testStorage, null, 3, 3); + runVersionChange(testStorage, initialVersion, newVersion, null, 3, 3); } // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), testStorage.getNamespaceName(), continuousClientsMessageCount); // ############################## } @@ -116,14 +115,14 @@ void setupEnvironment() { } @SuppressWarnings({"checkstyle:MethodLength"}) - void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersion, TestStorage testStorage, String initMetadataVersion, int controllerReplicas, int brokerReplicas) { + void runVersionChange(TestStorage testStorage, TestKafkaVersion initialVersion, TestKafkaVersion newVersion, String initMetadataVersion, int controllerReplicas, int brokerReplicas) { boolean isUpgrade = initialVersion.isUpgrade(newVersion); Map controllerPods; Map brokerPods; boolean sameMinorVersion = initialVersion.metadataVersion().equals(newVersion.metadataVersion()); - if (KafkaResource.kafkaClient().inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName).get() == null) { + if (KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(clusterName).get() == null) { LOGGER.info("Deploying initial Kafka version {} with metadataVersion={}", initialVersion.version(), initMetadataVersion); KafkaBuilder kafka = KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), clusterName, controllerReplicas, brokerReplicas) @@ -149,8 +148,8 @@ void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersi } resourceManager.createResourceWithWait( - KafkaNodePoolTemplates.controllerPoolPersistentStorage(TestConstants.CO_NAMESPACE, CONTROLLER_NODE_NAME, clusterName, controllerReplicas).build(), - KafkaNodePoolTemplates.brokerPoolPersistentStorage(TestConstants.CO_NAMESPACE, BROKER_NODE_NAME, clusterName, brokerReplicas).build(), + KafkaNodePoolTemplates.controllerPoolPersistentStorage(testStorage.getNamespaceName(), CONTROLLER_NODE_NAME, clusterName, controllerReplicas).build(), + KafkaNodePoolTemplates.brokerPoolPersistentStorage(testStorage.getNamespaceName(), BROKER_NODE_NAME, clusterName, brokerReplicas).build(), kafka.build() ); @@ -158,11 +157,12 @@ void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersi // Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update // ############################## // Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(TestConstants.CO_NAMESPACE, testStorage.getContinuousTopicName(), clusterName, 3, 3, 2).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getContinuousTopicName(), clusterName, 3, 3, 2).build()); // 40s is used within TF environment to make upgrade/downgrade more stable on slow env String producerAdditionConfiguration = "delivery.timeout.ms=300000\nrequest.timeout.ms=20000"; KafkaClients kafkaBasicClientJob = ClientUtils.getContinuousPlainClientBuilder(testStorage) + .withNamespaceName(testStorage.getNamespaceName()) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)) .withMessageCount(continuousClientsMessageCount) .withAdditionalConfig(producerAdditionConfiguration) @@ -175,47 +175,47 @@ void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersi LOGGER.info("Deployment of initial Kafka version (" + initialVersion.version() + ") complete"); - String controllerVersionResult = KafkaResource.kafkaClient().inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName).get().getStatus().getKafkaVersion(); + String controllerVersionResult = KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(clusterName).get().getStatus().getKafkaVersion(); LOGGER.info("Pre-change Kafka version: " + controllerVersionResult); - controllerPods = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, controllerSelector); - brokerPods = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, brokerSelector); + controllerPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), controllerSelector); + brokerPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), brokerSelector); LOGGER.info("Updating Kafka CR version field to " + newVersion.version()); // Change the version in Kafka CR KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> { kafka.getSpec().getKafka().setVersion(newVersion.version()); - }, TestConstants.CO_NAMESPACE); + }, testStorage.getNamespaceName()); LOGGER.info("Waiting for readiness of new Kafka version (" + newVersion.version() + ") to complete"); // Wait for the controllers' version change roll - controllerPods = RollingUpdateUtils.waitTillComponentHasRolled(TestConstants.CO_NAMESPACE, controllerSelector, controllerReplicas, controllerPods); + controllerPods = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), controllerSelector, controllerReplicas, controllerPods); LOGGER.info("1st Controllers roll (image change) is complete"); // Wait for the brokers' version change roll - brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(TestConstants.CO_NAMESPACE, brokerSelector, brokerReplicas, brokerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), brokerSelector, brokerReplicas, brokerPods); LOGGER.info("1st Brokers roll (image change) is complete"); - String currentMetadataVersion = KafkaResource.kafkaClient().inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName).get().getSpec().getKafka().getMetadataVersion(); + String currentMetadataVersion = KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(clusterName).get().getSpec().getKafka().getMetadataVersion(); LOGGER.info("Deployment of Kafka (" + newVersion.version() + ") complete"); - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName); - String controllerPodName = kubeClient().listPodsByPrefixInName(TestConstants.CO_NAMESPACE, KafkaResource.getStrimziPodSetName(clusterName, CONTROLLER_NODE_NAME)).get(0).getMetadata().getName(); - String brokerPodName = kubeClient().listPodsByPrefixInName(TestConstants.CO_NAMESPACE, KafkaResource.getStrimziPodSetName(clusterName, BROKER_NODE_NAME)).get(0).getMetadata().getName(); + String controllerPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(clusterName, CONTROLLER_NODE_NAME)).get(0).getMetadata().getName(); + String brokerPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(clusterName, BROKER_NODE_NAME)).get(0).getMetadata().getName(); // Extract the Kafka version number from the jars in the lib directory - controllerVersionResult = KafkaUtils.getVersionFromKafkaPodLibs(controllerPodName); + controllerVersionResult = KafkaUtils.getVersionFromKafkaPodLibs(testStorage.getNamespaceName(), controllerPodName); LOGGER.info("Post-change Kafka version query returned: " + controllerVersionResult); assertThat("Kafka container had version " + controllerVersionResult + " where " + newVersion.version() + " was expected", controllerVersionResult, is(newVersion.version())); // Extract the Kafka version number from the jars in the lib directory - String brokerVersionResult = KafkaUtils.getVersionFromKafkaPodLibs(brokerPodName); + String brokerVersionResult = KafkaUtils.getVersionFromKafkaPodLibs(testStorage.getNamespaceName(), brokerPodName); LOGGER.info("Post-change Kafka version query returned: " + brokerVersionResult); assertThat("Kafka container had version " + brokerVersionResult + " where " + newVersion.version() + @@ -230,27 +230,27 @@ void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersi kafka.getSpec().getKafka().setMetadataVersion(newVersion.metadataVersion()); LOGGER.info("Kafka config after updating '{}'", kafka.getSpec().getKafka().toString()); - }, TestConstants.CO_NAMESPACE); + }, testStorage.getNamespaceName()); LOGGER.info("Metadata version changed, it doesn't require rolling update, so the Pods should be stable"); - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); - assertFalse(RollingUpdateUtils.componentHasRolled(TestConstants.CO_NAMESPACE, controllerSelector, controllerPods)); - assertFalse(RollingUpdateUtils.componentHasRolled(TestConstants.CO_NAMESPACE, brokerSelector, brokerPods)); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName); + assertFalse(RollingUpdateUtils.componentHasRolled(testStorage.getNamespaceName(), controllerSelector, controllerPods)); + assertFalse(RollingUpdateUtils.componentHasRolled(testStorage.getNamespaceName(), brokerSelector, brokerPods)); } if (!isUpgrade) { LOGGER.info("Verifying that metadataVersion attribute updated correctly to version {}", initMetadataVersion); - assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName) + assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(testStorage.getNamespaceName()).withName(clusterName) .get().getStatus().getKafkaMetadataVersion().contains(initMetadataVersion), is(true)); } else { if (currentMetadataVersion != null) { LOGGER.info("Verifying that metadataVersion attribute updated correctly to version {}", newVersion.metadataVersion()); - assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName) + assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(testStorage.getNamespaceName()).withName(clusterName) .get().getStatus().getKafkaMetadataVersion().contains(newVersion.metadataVersion()), is(true)); } } - LOGGER.info("Waiting till Kafka Cluster {}/{} with specified version {} has the same version in status and specification", TestConstants.CO_NAMESPACE, clusterName, newVersion.version()); - KafkaUtils.waitUntilStatusKafkaVersionMatchesExpectedVersion(clusterName, TestConstants.CO_NAMESPACE, newVersion.version()); + LOGGER.info("Waiting till Kafka Cluster {}/{} with specified version {} has the same version in status and specification", testStorage.getNamespaceName(), clusterName, newVersion.version()); + KafkaUtils.waitUntilStatusKafkaVersionMatchesExpectedVersion(testStorage.getNamespaceName(), clusterName, newVersion.version()); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftStrimziDowngradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftStrimziDowngradeST.java index b91d5658f30..ac5b0349913 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftStrimziDowngradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftStrimziDowngradeST.java @@ -4,7 +4,6 @@ */ package io.strimzi.systemtest.upgrade.kraft; -import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.KindIPv6NotSupported; import io.strimzi.systemtest.annotations.MicroShiftNotSupported; import io.strimzi.systemtest.resources.NamespaceManager; @@ -26,6 +25,7 @@ import java.io.IOException; +import static io.strimzi.systemtest.Environment.TEST_SUITE_NAMESPACE; import static io.strimzi.systemtest.TestConstants.CO_NAMESPACE; import static io.strimzi.systemtest.TestConstants.KRAFT_UPGRADE; import static org.junit.jupiter.api.Assumptions.assumeTrue; @@ -43,25 +43,25 @@ public class KRaftStrimziDowngradeST extends AbstractKRaftUpgradeST { @ParameterizedTest(name = "testDowngradeStrimziVersion-{0}-{1}") @MethodSource("io.strimzi.systemtest.upgrade.VersionModificationDataLoader#loadYamlDowngradeDataForKRaft") void testDowngradeStrimziVersion(String from, String to, BundleVersionModificationData parameters) throws Exception { + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); assumeTrue(StUtils.isAllowOnCurrentEnvironment(parameters.getEnvFlakyVariable())); assumeTrue(StUtils.isAllowedOnCurrentK8sVersion(parameters.getEnvMaxK8sVersion())); LOGGER.debug("Running downgrade test from version {} to {}", from, to); - performDowngrade(parameters); + performDowngrade(CO_NAMESPACE, testStorage.getNamespaceName(), parameters); } @MicroShiftNotSupported("Due to lack of Kafka Connect build feature") @KindIPv6NotSupported("Our current CI setup doesn't allow pushing into internal registries that is needed in this test") @Test void testDowngradeOfKafkaConnectAndKafkaConnector() throws IOException { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); UpgradeKafkaVersion upgradeKafkaVersion = new UpgradeKafkaVersion(bundleDowngradeVersionData.getDeployKafkaVersion()); - doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(bundleDowngradeVersionData, testStorage, upgradeKafkaVersion); + doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(CO_NAMESPACE, testStorage, bundleDowngradeVersionData, upgradeKafkaVersion); } - @SuppressWarnings("MethodLength") - private void performDowngrade(BundleVersionModificationData downgradeData) throws IOException { + private void performDowngrade(String clusterOperatorNamespaceName, String componentsNamespaceName, BundleVersionModificationData downgradeData) throws IOException { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); String lowerMetadataVersion = downgradeData.getProcedures().getMetadataVersion(); @@ -71,41 +71,41 @@ private void performDowngrade(BundleVersionModificationData downgradeData) throw // We support downgrade only when you didn't upgrade to new inter.broker.protocol.version and log.message.format.version // https://strimzi.io/docs/operators/latest/full/deploying.html#con-target-downgrade-version-str - setupEnvAndUpgradeClusterOperator(downgradeData, testStorage, testUpgradeKafkaVersion, TestConstants.CO_NAMESPACE); + setupEnvAndUpgradeClusterOperator(clusterOperatorNamespaceName, testStorage, downgradeData, testUpgradeKafkaVersion); + logClusterOperatorPodImage(clusterOperatorNamespaceName); - logPodImages(TestConstants.CO_NAMESPACE); - - boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TestConstants.CO_NAMESPACE, eoSelector); + boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(componentsNamespaceName, eoSelector); // Downgrade CO - changeClusterOperator(downgradeData, TestConstants.CO_NAMESPACE); + changeClusterOperator(clusterOperatorNamespaceName, componentsNamespaceName, downgradeData); // Wait for Kafka cluster rolling update - waitForKafkaClusterRollingUpdate(); - - logPodImages(TestConstants.CO_NAMESPACE); + waitForKafkaClusterRollingUpdate(componentsNamespaceName); + logComponentsPodImages(componentsNamespaceName); // Downgrade kafka - changeKafkaAndMetadataVersion(downgradeData); + changeKafkaAndMetadataVersion(componentsNamespaceName, downgradeData); // Verify that pods are stable - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); + PodUtils.verifyThatRunningPodsAreStable(componentsNamespaceName, clusterName); - checkAllImages(downgradeData, TestConstants.CO_NAMESPACE); + checkAllComponentsImages(componentsNamespaceName, downgradeData); // Verify upgrade - verifyProcedure(downgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, wasUTOUsedBefore); + verifyProcedure(componentsNamespaceName, downgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore); } @BeforeEach void setupEnvironment() { NamespaceManager.getInstance().createNamespaceAndPrepare(CO_NAMESPACE); + NamespaceManager.getInstance().createNamespaceAndPrepare(TEST_SUITE_NAMESPACE); } @AfterEach void afterEach() { - cleanUpKafkaTopics(); - deleteInstalledYamls(coDir, TestConstants.CO_NAMESPACE); + cleanUpKafkaTopics(TEST_SUITE_NAMESPACE); + deleteInstalledYamls(CO_NAMESPACE, TEST_SUITE_NAMESPACE, coDir); NamespaceManager.getInstance().deleteNamespaceWithWait(CO_NAMESPACE); + NamespaceManager.getInstance().deleteNamespaceWithWait(TEST_SUITE_NAMESPACE); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftStrimziUpgradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftStrimziUpgradeST.java index ac859ac5698..5cb251e3043 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftStrimziUpgradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/kraft/KRaftStrimziUpgradeST.java @@ -5,7 +5,6 @@ package io.strimzi.systemtest.upgrade.kraft; import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.KindIPv6NotSupported; import io.strimzi.systemtest.annotations.MicroShiftNotSupported; @@ -24,6 +23,7 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.extension.ExtensionContext; @@ -33,6 +33,7 @@ import java.io.IOException; import java.util.Map; +import static io.strimzi.systemtest.Environment.TEST_SUITE_NAMESPACE; import static io.strimzi.systemtest.TestConstants.CO_NAMESPACE; import static io.strimzi.systemtest.TestConstants.KRAFT_UPGRADE; import static io.strimzi.test.k8s.KubeClusterResource.kubeClient; @@ -53,11 +54,12 @@ public class KRaftStrimziUpgradeST extends AbstractKRaftUpgradeST { @ParameterizedTest(name = "from: {0} (using FG <{2}>) to: {1} (using FG <{3}>) ") @MethodSource("io.strimzi.systemtest.upgrade.VersionModificationDataLoader#loadYamlUpgradeDataForKRaft") - void testUpgradeStrimziVersion(String fromVersion, String toVersion, String fgBefore, String fgAfter, BundleVersionModificationData upgradeData, ExtensionContext extensionContext) throws Exception { + void testUpgradeStrimziVersion(String fromVersion, String toVersion, String fgBefore, String fgAfter, BundleVersionModificationData upgradeData) throws Exception { + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); assumeTrue(StUtils.isAllowOnCurrentEnvironment(upgradeData.getEnvFlakyVariable())); assumeTrue(StUtils.isAllowedOnCurrentK8sVersion(upgradeData.getEnvMaxK8sVersion())); - performUpgrade(upgradeData, extensionContext); + performUpgrade(CO_NAMESPACE, testStorage.getNamespaceName(), upgradeData); } @IsolatedTest @@ -68,167 +70,166 @@ void testUpgradeKafkaWithoutVersion() throws IOException { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); // Setup env - setupEnvAndUpgradeClusterOperator(acrossUpgradeData, testStorage, upgradeKafkaVersion, TestConstants.CO_NAMESPACE); + setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, testStorage, acrossUpgradeData, upgradeKafkaVersion); - Map controllerSnapshot = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, controllerSelector); - Map brokerSnapshot = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, brokerSelector); - Map eoSnapshot = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, eoSelector); + Map controllerSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), controllerSelector); + Map brokerSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), brokerSelector); + Map eoSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), eoSelector); // Make snapshots of all Pods - makeSnapshots(); + makeComponentsSnapshots(testStorage.getNamespaceName()); // Check if UTO is used before changing the CO -> used for check for KafkaTopics - boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TestConstants.CO_NAMESPACE, eoSelector); + boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(testStorage.getNamespaceName(), eoSelector); // Upgrade CO - changeClusterOperator(acrossUpgradeData, TestConstants.CO_NAMESPACE); - - logPodImages(TestConstants.CO_NAMESPACE); + changeClusterOperator(CO_NAMESPACE, testStorage.getNamespaceName(), acrossUpgradeData); + logClusterOperatorPodImage(CO_NAMESPACE); + logComponentsPodImages(testStorage.getNamespaceName()); - RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, controllerSelector, 3, controllerSnapshot); - RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, brokerSelector, 3, brokerSnapshot); - DeploymentUtils.waitTillDepHasRolled(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoSnapshot); - - logPodImages(TestConstants.CO_NAMESPACE); - checkAllImages(acrossUpgradeData, TestConstants.CO_NAMESPACE); + RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), controllerSelector, 3, controllerSnapshot); + RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), brokerSelector, 3, brokerSnapshot); + DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoSnapshot); + checkAllComponentsImages(testStorage.getNamespaceName(), acrossUpgradeData); // Verify that Pods are stable - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName); // Verify upgrade - verifyProcedure(acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, wasUTOUsedBefore); + verifyProcedure(testStorage.getNamespaceName(), acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore); - String controllerPodName = kubeClient().listPodsByPrefixInName(TestConstants.CO_NAMESPACE, KafkaResource.getStrimziPodSetName(clusterName, CONTROLLER_NODE_NAME)).get(0).getMetadata().getName(); - String brokerPodName = kubeClient().listPodsByPrefixInName(TestConstants.CO_NAMESPACE, KafkaResource.getStrimziPodSetName(clusterName, BROKER_NODE_NAME)).get(0).getMetadata().getName(); + String controllerPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(clusterName, CONTROLLER_NODE_NAME)).get(0).getMetadata().getName(); + String brokerPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(clusterName, BROKER_NODE_NAME)).get(0).getMetadata().getName(); - assertThat(KafkaUtils.getVersionFromKafkaPodLibs(controllerPodName), containsString(acrossUpgradeData.getProcedures().getVersion())); - assertThat(KafkaUtils.getVersionFromKafkaPodLibs(brokerPodName), containsString(acrossUpgradeData.getProcedures().getVersion())); + assertThat(KafkaUtils.getVersionFromKafkaPodLibs(testStorage.getNamespaceName(), controllerPodName), containsString(acrossUpgradeData.getProcedures().getVersion())); + assertThat(KafkaUtils.getVersionFromKafkaPodLibs(testStorage.getNamespaceName(), brokerPodName), containsString(acrossUpgradeData.getProcedures().getVersion())); } @IsolatedTest - void testUpgradeAcrossVersionsWithUnsupportedKafkaVersion(ExtensionContext extensionContext) throws IOException { + void testUpgradeAcrossVersionsWithUnsupportedKafkaVersion() throws IOException { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); UpgradeKafkaVersion upgradeKafkaVersion = UpgradeKafkaVersion.getKafkaWithVersionFromUrl(acrossUpgradeData.getFromKafkaVersionsUrl(), acrossUpgradeData.getStartingKafkaVersion()); // Setup env - setupEnvAndUpgradeClusterOperator(acrossUpgradeData, testStorage, upgradeKafkaVersion, TestConstants.CO_NAMESPACE); + setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, testStorage, acrossUpgradeData, upgradeKafkaVersion); // Make snapshots of all Pods - makeSnapshots(); + makeComponentsSnapshots(testStorage.getNamespaceName()); // Check if UTO is used before changing the CO -> used for check for KafkaTopics - boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TestConstants.CO_NAMESPACE, eoSelector); + boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(testStorage.getNamespaceName(), eoSelector); // Upgrade CO - changeClusterOperator(acrossUpgradeData, TestConstants.CO_NAMESPACE); + changeClusterOperator(CO_NAMESPACE, testStorage.getNamespaceName(), acrossUpgradeData); - waitForKafkaClusterRollingUpdate(); + waitForKafkaClusterRollingUpdate(testStorage.getNamespaceName()); - logPodImages(TestConstants.CO_NAMESPACE); + logPodImages(CO_NAMESPACE); // Upgrade kafka - changeKafkaAndMetadataVersion(acrossUpgradeData, true); + changeKafkaAndMetadataVersion(testStorage.getNamespaceName(), acrossUpgradeData, true); - logPodImages(TestConstants.CO_NAMESPACE); + logPodImages(CO_NAMESPACE); - checkAllImages(acrossUpgradeData, TestConstants.CO_NAMESPACE); + checkAllComponentsImages(testStorage.getNamespaceName(), acrossUpgradeData); // Verify that Pods are stable - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName); // Verify upgrade - verifyProcedure(acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, wasUTOUsedBefore); + verifyProcedure(testStorage.getNamespaceName(), acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore); } @IsolatedTest - void testUpgradeAcrossVersionsWithNoKafkaVersion(ExtensionContext extensionContext) throws IOException { + void testUpgradeAcrossVersionsWithNoKafkaVersion() throws IOException { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); // Setup env - setupEnvAndUpgradeClusterOperator(acrossUpgradeData, testStorage, null, TestConstants.CO_NAMESPACE); + setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, testStorage, acrossUpgradeData, null); // Check if UTO is used before changing the CO -> used for check for KafkaTopics - boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TestConstants.CO_NAMESPACE, eoSelector); + boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(testStorage.getNamespaceName(), eoSelector); // Upgrade CO - changeClusterOperator(acrossUpgradeData, TestConstants.CO_NAMESPACE); + changeClusterOperator(CO_NAMESPACE, testStorage.getNamespaceName(), acrossUpgradeData); // Wait till first upgrade finished - controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, controllerSelector, 3, controllerPods); - brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, brokerSelector, 3, brokerPods); - eoPods = DeploymentUtils.waitTillDepHasRolled(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods); + controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), controllerSelector, 3, controllerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), brokerSelector, 3, brokerPods); + eoPods = DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods); LOGGER.info("Rolling to new images has finished!"); - logPodImages(TestConstants.CO_NAMESPACE); + logPodImages(CO_NAMESPACE); // Upgrade kafka - changeKafkaAndMetadataVersion(acrossUpgradeData); - - logPodImages(TestConstants.CO_NAMESPACE); - - checkAllImages(acrossUpgradeData, TestConstants.CO_NAMESPACE); + changeKafkaAndMetadataVersion(testStorage.getNamespaceName(), acrossUpgradeData); + logComponentsPodImages(testStorage.getNamespaceName()); + checkAllComponentsImages(testStorage.getNamespaceName(), acrossUpgradeData); // Verify that Pods are stable - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName); // Verify upgrade - verifyProcedure(acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, wasUTOUsedBefore); + verifyProcedure(testStorage.getNamespaceName(), acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore); } @MicroShiftNotSupported("Due to lack of Kafka Connect build feature") @KindIPv6NotSupported("Our current CI setup doesn't allow pushing into internal registries that is needed in this test") @IsolatedTest void testUpgradeOfKafkaConnectAndKafkaConnector(final ExtensionContext extensionContext) throws IOException { - final TestStorage testStorage = new TestStorage(extensionContext, TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(extensionContext); final UpgradeKafkaVersion upgradeKafkaVersion = new UpgradeKafkaVersion(acrossUpgradeData.getDefaultKafka()); - doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(acrossUpgradeData, testStorage, upgradeKafkaVersion); + doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(CO_NAMESPACE, testStorage, acrossUpgradeData, upgradeKafkaVersion); } - private void performUpgrade(BundleVersionModificationData upgradeData, ExtensionContext extensionContext) throws IOException { + private void performUpgrade(String clusterOperatorNamespaceName, String componentsNamespaceName, BundleVersionModificationData upgradeData) throws IOException { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); // leave empty, so the original Kafka version from appropriate Strimzi's yaml will be used UpgradeKafkaVersion upgradeKafkaVersion = new UpgradeKafkaVersion(); // Setup env - setupEnvAndUpgradeClusterOperator(upgradeData, testStorage, upgradeKafkaVersion, TestConstants.CO_NAMESPACE); + setupEnvAndUpgradeClusterOperator(clusterOperatorNamespaceName, testStorage, upgradeData, upgradeKafkaVersion); // Upgrade CO to HEAD - logPodImages(TestConstants.CO_NAMESPACE); + logClusterOperatorPodImage(clusterOperatorNamespaceName); + logComponentsPodImages(componentsNamespaceName); // Check if UTO is used before changing the CO -> used for check for KafkaTopics - boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TestConstants.CO_NAMESPACE, eoSelector); + boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(componentsNamespaceName, eoSelector); - changeClusterOperator(upgradeData, TestConstants.CO_NAMESPACE); + changeClusterOperator(clusterOperatorNamespaceName, componentsNamespaceName, upgradeData); if (TestKafkaVersion.supportedVersionsContainsVersion(upgradeData.getDefaultKafkaVersionPerStrimzi())) { - waitForKafkaClusterRollingUpdate(); + waitForKafkaClusterRollingUpdate(componentsNamespaceName); } - logPodImages(TestConstants.CO_NAMESPACE); + logClusterOperatorPodImage(clusterOperatorNamespaceName); + logComponentsPodImages(componentsNamespaceName); // Upgrade kafka - changeKafkaAndMetadataVersion(upgradeData, true); - - logPodImages(TestConstants.CO_NAMESPACE); - - checkAllImages(upgradeData, TestConstants.CO_NAMESPACE); + changeKafkaAndMetadataVersion(componentsNamespaceName, upgradeData, true); + logComponentsPodImages(componentsNamespaceName); + checkAllComponentsImages(componentsNamespaceName, upgradeData); // Verify that Pods are stable - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); + PodUtils.verifyThatRunningPodsAreStable(componentsNamespaceName, clusterName); // Verify upgrade - verifyProcedure(upgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, wasUTOUsedBefore); + verifyProcedure(componentsNamespaceName, upgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore); } @BeforeEach void setupEnvironment() { NamespaceManager.getInstance().createNamespaceAndPrepare(CO_NAMESPACE); + NamespaceManager.getInstance().createNamespaceAndPrepare(TEST_SUITE_NAMESPACE); } - protected void afterEachMayOverride(ExtensionContext extensionContext) { - cleanUpKafkaTopics(); - ResourceManager.getInstance().deleteResources(); + @AfterEach + void afterEach() { + cleanUpKafkaTopics(TEST_SUITE_NAMESPACE); + deleteInstalledYamls(CO_NAMESPACE, TEST_SUITE_NAMESPACE, coDir); NamespaceManager.getInstance().deleteNamespaceWithWait(CO_NAMESPACE); + NamespaceManager.getInstance().deleteNamespaceWithWait(TEST_SUITE_NAMESPACE); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java index 558f221592f..f5ddea5f743 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java @@ -7,7 +7,6 @@ import io.strimzi.api.kafka.Crds; import io.strimzi.api.kafka.model.kafka.KafkaBuilder; import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.KRaftNotSupported; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; @@ -47,11 +46,11 @@ public class KafkaUpgradeDowngradeST extends AbstractUpgradeST { private static final Logger LOGGER = LogManager.getLogger(KafkaUpgradeDowngradeST.class); - private final int continuousClientsMessageCount = 500; + private final int continuousClientsMessageCount = 300; @IsolatedTest void testKafkaClusterUpgrade() { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); List sortedVersions = TestKafkaVersion.getSupportedKafkaVersions(); for (int x = 0; x < sortedVersions.size() - 1; x++) { @@ -61,19 +60,19 @@ void testKafkaClusterUpgrade() { // If it is an upgrade test we keep the message format as the lower version number String logMsgFormat = initialVersion.messageVersion(); String interBrokerProtocol = initialVersion.protocolVersion(); - runVersionChange(initialVersion, newVersion, testStorage, logMsgFormat, interBrokerProtocol, 3, 3); + runVersionChange(testStorage, initialVersion, newVersion, logMsgFormat, interBrokerProtocol, 3, 3); } // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), testStorage.getNamespaceName(), continuousClientsMessageCount); // ############################## } @IsolatedTest void testKafkaClusterDowngrade() { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); List sortedVersions = TestKafkaVersion.getSupportedKafkaVersions(); for (int x = sortedVersions.size() - 1; x > 0; x--) { @@ -83,19 +82,19 @@ void testKafkaClusterDowngrade() { // If it is a downgrade then we make sure to use the lower version number for the message format String logMsgFormat = newVersion.messageVersion(); String interBrokerProtocol = newVersion.protocolVersion(); - runVersionChange(initialVersion, newVersion, testStorage, logMsgFormat, interBrokerProtocol, 3, 3); + runVersionChange(testStorage, initialVersion, newVersion, logMsgFormat, interBrokerProtocol, 3, 3); } // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), testStorage.getNamespaceName(), continuousClientsMessageCount); // ############################## } @IsolatedTest void testKafkaClusterDowngradeToOlderMessageFormat() { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); List sortedVersions = TestKafkaVersion.getSupportedKafkaVersions(); String initLogMsgFormat = sortedVersions.get(0).messageVersion(); @@ -105,38 +104,38 @@ void testKafkaClusterDowngradeToOlderMessageFormat() { TestKafkaVersion initialVersion = sortedVersions.get(x); TestKafkaVersion newVersion = sortedVersions.get(x - 1); - runVersionChange(initialVersion, newVersion, testStorage, initLogMsgFormat, initInterBrokerProtocol, 3, 3); + runVersionChange(testStorage, initialVersion, newVersion, initLogMsgFormat, initInterBrokerProtocol, 3, 3); } // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), testStorage.getNamespaceName(), continuousClientsMessageCount); // ############################## } @IsolatedTest void testUpgradeWithNoMessageAndProtocolVersionsSet() { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); List sortedVersions = TestKafkaVersion.getSupportedKafkaVersions(); for (int x = 0; x < sortedVersions.size() - 1; x++) { TestKafkaVersion initialVersion = sortedVersions.get(x); TestKafkaVersion newVersion = sortedVersions.get(x + 1); - runVersionChange(initialVersion, newVersion, testStorage, null, null, 3, 3); + runVersionChange(testStorage, initialVersion, newVersion, null, null, 3, 3); } // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousProducerName(), TestConstants.CO_NAMESPACE, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousProducerName(), testStorage.getNamespaceName(), continuousClientsMessageCount); // ############################## } @IsolatedTest void testUpgradeWithoutLogMessageFormatVersionSet() { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); List sortedVersions = TestKafkaVersion.getSupportedKafkaVersions(); for (int x = 0; x < sortedVersions.size() - 1; x++) { @@ -145,13 +144,13 @@ void testUpgradeWithoutLogMessageFormatVersionSet() { // If it is an upgrade test we keep the message format as the lower version number String interBrokerProtocol = initialVersion.protocolVersion(); - runVersionChange(initialVersion, newVersion, testStorage, null, interBrokerProtocol, 3, 3); + runVersionChange(testStorage, initialVersion, newVersion, null, interBrokerProtocol, 3, 3); } // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), testStorage.getNamespaceName(), continuousClientsMessageCount); // ############################## } @@ -161,15 +160,15 @@ void setupEnvironment() { } @SuppressWarnings({"checkstyle:MethodLength"}) - void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersion, TestStorage testStorage, String initLogMsgFormat, String initInterBrokerProtocol, int kafkaReplicas, int zkReplicas) { + void runVersionChange(TestStorage testStorage, TestKafkaVersion initialVersion, TestKafkaVersion newVersion, String initLogMsgFormat, String initInterBrokerProtocol, int kafkaReplicas, int zkReplicas) { boolean isUpgrade = initialVersion.isUpgrade(newVersion); Map brokerPods; boolean sameMinorVersion = initialVersion.protocolVersion().equals(newVersion.protocolVersion()); - if (KafkaResource.kafkaClient().inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName).get() == null) { + if (KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(clusterName).get() == null) { LOGGER.info("Deploying initial Kafka version {} with logMessageFormat={} and interBrokerProtocol={}", initialVersion.version(), initLogMsgFormat, initInterBrokerProtocol); - KafkaBuilder kafka = KafkaTemplates.kafkaPersistent(TestConstants.CO_NAMESPACE, clusterName, kafkaReplicas, zkReplicas) + KafkaBuilder kafka = KafkaTemplates.kafkaPersistent(testStorage.getNamespaceName(), clusterName, kafkaReplicas, zkReplicas) .editSpec() .editKafka() .withVersion(initialVersion.version()) @@ -195,18 +194,19 @@ void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersi .endKafka() .endSpec(); } - resourceManager.createResourceWithWait(KafkaNodePoolTemplates.brokerPoolPersistentStorage(TestConstants.CO_NAMESPACE, poolName, clusterName, kafkaReplicas).build()); + resourceManager.createResourceWithWait(KafkaNodePoolTemplates.brokerPoolPersistentStorage(testStorage.getNamespaceName(), poolName, clusterName, kafkaReplicas).build()); resourceManager.createResourceWithWait(kafka.build()); // ############################## // Attach clients which will continuously produce/consume messages to/from Kafka brokers during rolling update // ############################## // Setup topic, which has 3 replicas and 2 min.isr to see if producer will be able to work during rolling update - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(TestConstants.CO_NAMESPACE, testStorage.getContinuousTopicName(), clusterName, 3, 3, 2).build()); + resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getContinuousTopicName(), clusterName, 3, 3, 2).build()); String producerAdditionConfiguration = "delivery.timeout.ms=300000\nrequest.timeout.ms=20000"; KafkaClients kafkaBasicClientJob = ClientUtils.getContinuousPlainClientBuilder(testStorage) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)) + .withNamespaceName(testStorage.getNamespaceName()) .withMessageCount(continuousClientsMessageCount) .withAdditionalConfig(producerAdditionConfiguration) .build(); @@ -217,7 +217,7 @@ void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersi } else { LOGGER.info("Initial Kafka version (" + initialVersion.version() + ") is already ready"); - brokerPods = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, brokerSelector); + brokerPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), brokerSelector); // Wait for log.message.format.version and inter.broker.protocol.version change if (!sameMinorVersion @@ -232,65 +232,65 @@ void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersi config.put("inter.broker.protocol.version", newVersion.protocolVersion()); kafka.getSpec().getKafka().setConfig(config); LOGGER.info("Kafka config after updating '{}'", kafka.getSpec().getKafka().getConfig().toString()); - }, TestConstants.CO_NAMESPACE); + }, testStorage.getNamespaceName()); - RollingUpdateUtils.waitTillComponentHasRolled(TestConstants.CO_NAMESPACE, brokerSelector, kafkaReplicas, brokerPods); + RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), brokerSelector, kafkaReplicas, brokerPods); } } LOGGER.info("Deployment of initial Kafka version (" + initialVersion.version() + ") complete"); String zkVersionCommand = "ls libs | grep -Po 'zookeeper-\\K\\d+.\\d+.\\d+' | head -1"; - String zkResult = cmdKubeClient().execInPodContainer(KafkaResources.zookeeperPodName(clusterName, 0), + String zkResult = cmdKubeClient(testStorage.getNamespaceName()).execInPodContainer(KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper", "/bin/bash", "-c", zkVersionCommand).out().trim(); LOGGER.info("Pre-change ZooKeeper version query returned: " + zkResult); - String kafkaVersionResult = KafkaResource.kafkaClient().inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName).get().getStatus().getKafkaVersion(); + String kafkaVersionResult = KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(clusterName).get().getStatus().getKafkaVersion(); LOGGER.info("Pre-change Kafka version: " + kafkaVersionResult); - Map controllerPods = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, controllerSelector); - brokerPods = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, brokerSelector); + Map controllerPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), controllerSelector); + brokerPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), brokerSelector); LOGGER.info("Updating Kafka CR version field to " + newVersion.version()); // Change the version in Kafka CR KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> { kafka.getSpec().getKafka().setVersion(newVersion.version()); - }, TestConstants.CO_NAMESPACE); + }, testStorage.getNamespaceName()); LOGGER.info("Waiting for readiness of new Kafka version (" + newVersion.version() + ") to complete"); // Wait for the zk version change roll - controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, controllerSelector, zkReplicas, controllerPods); + RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), controllerSelector, zkReplicas, controllerPods); LOGGER.info("1st ZooKeeper roll (image change) is complete"); // Wait for the kafka broker version change roll - brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(TestConstants.CO_NAMESPACE, brokerSelector, brokerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), brokerSelector, brokerPods); LOGGER.info("1st Kafka roll (image change) is complete"); - Object currentLogMessageFormat = KafkaResource.kafkaClient().inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName).get().getSpec().getKafka().getConfig().get("log.message.format.version"); - Object currentInterBrokerProtocol = KafkaResource.kafkaClient().inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName).get().getSpec().getKafka().getConfig().get("inter.broker.protocol.version"); + Object currentLogMessageFormat = KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(clusterName).get().getSpec().getKafka().getConfig().get("log.message.format.version"); + Object currentInterBrokerProtocol = KafkaResource.kafkaClient().inNamespace(testStorage.getNamespaceName()).withName(clusterName).get().getSpec().getKafka().getConfig().get("inter.broker.protocol.version"); if (isUpgrade && !sameMinorVersion) { LOGGER.info("Kafka version is increased, two RUs remaining for increasing IBPV and LMFV"); if (currentInterBrokerProtocol == null) { - brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(TestConstants.CO_NAMESPACE, brokerSelector, brokerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), brokerSelector, brokerPods); LOGGER.info("Kafka roll (inter.broker.protocol.version) is complete"); } // Only Kafka versions before 3.0.0 require the second roll if (currentLogMessageFormat == null && TestKafkaVersion.compareDottedVersions(newVersion.protocolVersion(), "3.0") < 0) { - brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, brokerSelector, kafkaReplicas, brokerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), brokerSelector, kafkaReplicas, brokerPods); LOGGER.info("Kafka roll (log.message.format.version) is complete"); } } LOGGER.info("Deployment of Kafka (" + newVersion.version() + ") complete"); - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, KafkaResources.kafkaComponentName(clusterName)); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), KafkaResources.kafkaComponentName(clusterName)); // Extract the zookeeper version number from the jars in the lib directory - zkResult = cmdKubeClient().execInPodContainer(KafkaResources.zookeeperPodName(clusterName, 0), + zkResult = cmdKubeClient(testStorage.getNamespaceName()).execInPodContainer(KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper", "/bin/bash", "-c", zkVersionCommand).out().trim(); LOGGER.info("Post-change ZooKeeper version query returned: " + zkResult); @@ -298,8 +298,8 @@ void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersi " was expected", zkResult, is(newVersion.zookeeperVersion())); // Extract the Kafka version number from the jars in the lib directory - String brokerPodName = kubeClient().listPods(TestConstants.CO_NAMESPACE, brokerSelector).get(0).getMetadata().getName(); - kafkaVersionResult = KafkaUtils.getVersionFromKafkaPodLibs(brokerPodName); + String brokerPodName = kubeClient().listPods(testStorage.getNamespaceName(), brokerSelector).get(0).getMetadata().getName(); + kafkaVersionResult = KafkaUtils.getVersionFromKafkaPodLibs(testStorage.getNamespaceName(), brokerPodName); LOGGER.info("Post-change Kafka version query returned: " + kafkaVersionResult); assertThat("Kafka container had version " + kafkaVersionResult + " where " + newVersion.version() + @@ -316,39 +316,39 @@ void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersi config.put("inter.broker.protocol.version", newVersion.protocolVersion()); kafka.getSpec().getKafka().setConfig(config); LOGGER.info("Kafka config after updating '{}'", kafka.getSpec().getKafka().getConfig().toString()); - }, TestConstants.CO_NAMESPACE); + }, testStorage.getNamespaceName()); if (currentLogMessageFormat != null || currentInterBrokerProtocol != null) { LOGGER.info("Change of configuration is done manually - rolling update"); // Wait for the kafka broker version of log.message.format.version change roll - RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, brokerSelector, kafkaReplicas, brokerPods); + RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), brokerSelector, kafkaReplicas, brokerPods); LOGGER.info("Kafka roll (log.message.format.version change) is complete"); } else { LOGGER.info("Cluster Operator already changed the configuration, there should be no rolling update"); - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, KafkaResources.kafkaComponentName(clusterName)); - assertFalse(RollingUpdateUtils.componentHasRolled(TestConstants.CO_NAMESPACE, brokerSelector, brokerPods)); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), KafkaResources.kafkaComponentName(clusterName)); + assertFalse(RollingUpdateUtils.componentHasRolled(testStorage.getNamespaceName(), brokerSelector, brokerPods)); } } if (!isUpgrade) { LOGGER.info("Verifying that log.message.format attribute updated correctly to version {}", initLogMsgFormat); - assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName) + assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(testStorage.getNamespaceName()).withName(clusterName) .get().getSpec().getKafka().getConfig().get("log.message.format.version"), is(initLogMsgFormat)); LOGGER.info("Verifying that inter.broker.protocol.version attribute updated correctly to version {}", initInterBrokerProtocol); - assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName) + assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(testStorage.getNamespaceName()).withName(clusterName) .get().getSpec().getKafka().getConfig().get("inter.broker.protocol.version"), is(initInterBrokerProtocol)); } else { if (currentLogMessageFormat != null && currentInterBrokerProtocol != null) { LOGGER.info("Verifying that log.message.format attribute updated correctly to version {}", newVersion.messageVersion()); - assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName) + assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(testStorage.getNamespaceName()).withName(clusterName) .get().getSpec().getKafka().getConfig().get("log.message.format.version"), is(newVersion.messageVersion())); LOGGER.info("Verifying that inter.broker.protocol.version attribute updated correctly to version {}", newVersion.protocolVersion()); - assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(TestConstants.CO_NAMESPACE).withName(clusterName) + assertThat(Crds.kafkaOperation(kubeClient().getClient()).inNamespace(testStorage.getNamespaceName()).withName(clusterName) .get().getSpec().getKafka().getConfig().get("inter.broker.protocol.version"), is(newVersion.protocolVersion())); } } - LOGGER.info("Waiting till Kafka Cluster {}/{} with specified version {} has the same version in status and specification", TestConstants.CO_NAMESPACE, clusterName, newVersion.version()); - KafkaUtils.waitUntilStatusKafkaVersionMatchesExpectedVersion(clusterName, TestConstants.CO_NAMESPACE, newVersion.version()); + LOGGER.info("Waiting till Kafka Cluster {}/{} with specified version {} has the same version in status and specification", testStorage.getNamespaceName(), clusterName, newVersion.version()); + KafkaUtils.waitUntilStatusKafkaVersionMatchesExpectedVersion(testStorage.getNamespaceName(), clusterName, newVersion.version()); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/OlmUpgradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/OlmUpgradeST.java index f74c158eef7..c7b7ea8d59d 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/OlmUpgradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/OlmUpgradeST.java @@ -9,7 +9,7 @@ import io.strimzi.api.kafka.model.topic.KafkaTopic; import io.strimzi.api.kafka.model.topic.KafkaTopicBuilder; import io.strimzi.systemtest.Environment; -import io.strimzi.systemtest.TestConstants; +import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.KRaftNotSupported; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; @@ -30,7 +30,6 @@ import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; import java.io.File; import java.io.IOException; @@ -55,9 +54,9 @@ public class OlmUpgradeST extends AbstractUpgradeST { private static final Logger LOGGER = LogManager.getLogger(OlmUpgradeST.class); private final OlmVersionModificationData olmUpgradeData = new VersionModificationDataLoader(ModificationType.OLM_UPGRADE).getOlmUpgradeData(); - @Test + @IsolatedTest void testStrimziUpgrade() throws IOException { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), CO_NAMESPACE); final String toVersion = olmUpgradeData.getToVersion(); final String fromVersion = olmUpgradeData.getFromVersion(); @@ -77,9 +76,9 @@ void testStrimziUpgrade() throws IOException { File dir = FileUtils.downloadAndUnzip(olmUpgradeData.getFromUrl()); File kafkaYaml = new File(dir, olmUpgradeData.getFromExamples() + "/examples/kafka/kafka-persistent.yaml"); - LOGGER.info("Deploying Kafka from file: {}", kafkaYaml.getPath()); - KubeClusterResource.cmdKubeClient().create(kafkaYaml); - waitForReadinessOfKafkaCluster(); + LOGGER.info("Deploying Kafka in Namespace: {} from file: {}", CO_NAMESPACE, kafkaYaml.getPath()); + KubeClusterResource.cmdKubeClient(CO_NAMESPACE).create(kafkaYaml); + waitForReadinessOfKafkaCluster(CO_NAMESPACE); // Create KafkaTopic final String topicUpgradeName = "topic-upgrade"; @@ -89,7 +88,7 @@ void testStrimziUpgrade() throws IOException { KafkaTopic kafkaUpgradeTopic = new YAMLMapper().readValue(new File(dir, olmUpgradeData.getFromExamples() + "/examples/topic/kafka-topic.yaml"), KafkaTopic.class); kafkaUpgradeTopic = new KafkaTopicBuilder(kafkaUpgradeTopic) .editMetadata() - .withNamespace(TestConstants.CO_NAMESPACE) + .withNamespace(CO_NAMESPACE) .withName(topicUpgradeName) .endMetadata() .editSpec() @@ -103,7 +102,7 @@ void testStrimziUpgrade() throws IOException { KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder() .withProducerName(testStorage.getProducerName()) .withConsumerName(testStorage.getConsumerName()) - .withNamespaceName(TestConstants.CO_NAMESPACE) + .withNamespaceName(CO_NAMESPACE) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)) .withTopicName(topicUpgradeName) .withMessageCount(testStorage.getMessageCount()) @@ -112,11 +111,11 @@ void testStrimziUpgrade() throws IOException { resourceManager.createResourceWithWait(kafkaBasicClientJob.producerStrimzi(), kafkaBasicClientJob.consumerStrimzi()); - String clusterOperatorDeploymentName = ResourceManager.kubeClient().getDeploymentNameByPrefix(Environment.OLM_OPERATOR_DEPLOYMENT_NAME); + String clusterOperatorDeploymentName = ResourceManager.kubeClient().namespace(CO_NAMESPACE).getDeploymentNameByPrefix(Environment.OLM_OPERATOR_DEPLOYMENT_NAME); LOGGER.info("Old deployment name of Cluster Operator is {}", clusterOperatorDeploymentName); // ======== Cluster Operator upgrade starts ======== - makeSnapshots(); + makeComponentsSnapshots(CO_NAMESPACE); OlmConfiguration upgradeOlmConfig = new OlmConfigurationBuilder(clusterOperator.getOlmResource().getOlmConfiguration()) .withChannelName("stable") @@ -124,30 +123,30 @@ void testStrimziUpgrade() throws IOException { .build(); // Cluster Operator upgrade - clusterOperator.upgradeClusterOperator(upgradeOlmConfig); + clusterOperator.upgradeClusterOperator(CO_NAMESPACE, upgradeOlmConfig); - clusterOperatorDeploymentName = ResourceManager.kubeClient().getDeploymentNameByPrefix(Environment.OLM_OPERATOR_DEPLOYMENT_NAME); + clusterOperatorDeploymentName = ResourceManager.kubeClient().namespace(CO_NAMESPACE).getDeploymentNameByPrefix(Environment.OLM_OPERATOR_DEPLOYMENT_NAME); LOGGER.info("New deployment name of Cluster Operator is {}", clusterOperatorDeploymentName); ResourceManager.setCoDeploymentName(clusterOperatorDeploymentName); // Verification that Cluster Operator has been upgraded to a correct version - String afterUpgradeVersionOfCo = kubeClient().getCsvWithPrefix(TestConstants.CO_NAMESPACE, upgradeOlmConfig.getOlmAppBundlePrefix()).getSpec().getVersion(); + String afterUpgradeVersionOfCo = kubeClient().getCsvWithPrefix(CO_NAMESPACE, upgradeOlmConfig.getOlmAppBundlePrefix()).getSpec().getVersion(); assertThat(afterUpgradeVersionOfCo, is(toVersion)); // Wait for Rolling Update to finish - controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, controllerSelector, 3, controllerPods); - brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, brokerSelector, 3, brokerPods); - eoPods = DeploymentUtils.waitTillDepHasRolled(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods); + controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(CO_NAMESPACE, controllerSelector, 3, controllerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(CO_NAMESPACE, brokerSelector, 3, brokerPods); + eoPods = DeploymentUtils.waitTillDepHasRolled(CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods); // ======== Cluster Operator upgrade ends ======== // ======== Kafka upgrade starts ======== - logPodImages(TestConstants.CO_NAMESPACE); - changeKafkaAndLogFormatVersion(olmUpgradeData); - logPodImages(TestConstants.CO_NAMESPACE); + logComponentsPodImages(CO_NAMESPACE); + changeKafkaAndLogFormatVersion(CO_NAMESPACE, olmUpgradeData); + logComponentsPodImages(CO_NAMESPACE); // ======== Kafka upgrade ends ======== // Wait for messages of previously created clients - ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), TestConstants.CO_NAMESPACE, testStorage.getMessageCount()); + ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), CO_NAMESPACE, testStorage.getMessageCount()); } @BeforeAll diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziDowngradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziDowngradeST.java index d0b0f5df949..895ea3ff91c 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziDowngradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziDowngradeST.java @@ -4,7 +4,7 @@ */ package io.strimzi.systemtest.upgrade.regular; -import io.strimzi.systemtest.TestConstants; +import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.KRaftNotSupported; import io.strimzi.systemtest.annotations.KindIPv6NotSupported; import io.strimzi.systemtest.annotations.MicroShiftNotSupported; @@ -22,13 +22,13 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; import java.util.List; +import static io.strimzi.systemtest.Environment.TEST_SUITE_NAMESPACE; import static io.strimzi.systemtest.TestConstants.CO_NAMESPACE; import static io.strimzi.systemtest.TestConstants.UPGRADE; import static org.junit.jupiter.api.Assumptions.assumeTrue; @@ -48,63 +48,66 @@ public class StrimziDowngradeST extends AbstractUpgradeST { @ParameterizedTest(name = "testDowngradeStrimziVersion-{0}-{1}") @MethodSource("io.strimzi.systemtest.upgrade.VersionModificationDataLoader#loadYamlDowngradeData") void testDowngradeStrimziVersion(String from, String to, BundleVersionModificationData parameters) throws Exception { + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); assumeTrue(StUtils.isAllowOnCurrentEnvironment(parameters.getEnvFlakyVariable())); assumeTrue(StUtils.isAllowedOnCurrentK8sVersion(parameters.getEnvMaxK8sVersion())); LOGGER.debug("Running downgrade test from version {} to {}", from, to); - performDowngrade(parameters); + performDowngrade(CO_NAMESPACE, testStorage, parameters); } @MicroShiftNotSupported("Due to lack of Kafka Connect build feature") @KindIPv6NotSupported("Our current CI setup doesn't allow pushing into internal registries that is needed in this test") - @Test + @IsolatedTest void testDowngradeOfKafkaConnectAndKafkaConnector() throws IOException { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final BundleVersionModificationData bundleDowngradeDataWithFeatureGates = bundleDowngradeMetadata.stream() .filter(bundleMetadata -> bundleMetadata.getFeatureGatesBefore() != null && !bundleMetadata.getFeatureGatesBefore().isEmpty() || bundleMetadata.getFeatureGatesAfter() != null && !bundleMetadata.getFeatureGatesAfter().isEmpty()).toList().get(0); UpgradeKafkaVersion upgradeKafkaVersion = new UpgradeKafkaVersion(bundleDowngradeDataWithFeatureGates.getDeployKafkaVersion()); - doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(bundleDowngradeDataWithFeatureGates, testStorage, upgradeKafkaVersion); + doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(CO_NAMESPACE, testStorage, bundleDowngradeDataWithFeatureGates, upgradeKafkaVersion); } - @SuppressWarnings("MethodLength") - private void performDowngrade(BundleVersionModificationData downgradeData) throws IOException { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); + private void performDowngrade(String clusterOperatorNamespaceName, TestStorage testStorage, BundleVersionModificationData downgradeData) throws IOException { UpgradeKafkaVersion testUpgradeKafkaVersion = UpgradeKafkaVersion.getKafkaWithVersionFromUrl(downgradeData.getFromKafkaVersionsUrl(), downgradeData.getDeployKafkaVersion()); // Setup env // We support downgrade only when you didn't upgrade to new inter.broker.protocol.version and log.message.format.version // https://strimzi.io/docs/operators/latest/full/deploying.html#con-target-downgrade-version-str - setupEnvAndUpgradeClusterOperator(downgradeData, testStorage, testUpgradeKafkaVersion, TestConstants.CO_NAMESPACE); - logPodImages(TestConstants.CO_NAMESPACE); + setupEnvAndUpgradeClusterOperator(clusterOperatorNamespaceName, testStorage, downgradeData, testUpgradeKafkaVersion); + + logClusterOperatorPodImage(clusterOperatorNamespaceName); + logComponentsPodImages(testStorage.getNamespaceName()); // Check if UTO is used before changing the CO -> used for check for KafkaTopics - boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TestConstants.CO_NAMESPACE, eoSelector); + boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(testStorage.getNamespaceName(), eoSelector); // Downgrade CO - changeClusterOperator(downgradeData, TestConstants.CO_NAMESPACE); + changeClusterOperator(clusterOperatorNamespaceName, testStorage.getNamespaceName(), downgradeData); // Wait for Kafka cluster rolling update - waitForKafkaClusterRollingUpdate(); - logPodImages(TestConstants.CO_NAMESPACE); + waitForKafkaClusterRollingUpdate(testStorage.getNamespaceName()); + logComponentsPodImages(testStorage.getNamespaceName()); // Downgrade kafka - changeKafkaAndLogFormatVersion(downgradeData); + changeKafkaAndLogFormatVersion(testStorage.getNamespaceName(), downgradeData); // Verify that pods are stable - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); - checkAllImages(downgradeData, TestConstants.CO_NAMESPACE); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName); + checkAllComponentsImages(testStorage.getNamespaceName(), downgradeData); // Verify upgrade - verifyProcedure(downgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, wasUTOUsedBefore); + verifyProcedure(testStorage.getNamespaceName(), downgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore); } @BeforeEach void setupEnvironment() { NamespaceManager.getInstance().createNamespaceAndPrepare(CO_NAMESPACE); + NamespaceManager.getInstance().createNamespaceAndPrepare(TEST_SUITE_NAMESPACE); } @AfterEach void afterEach() { - cleanUpKafkaTopics(); - deleteInstalledYamls(coDir, TestConstants.CO_NAMESPACE); + cleanUpKafkaTopics(TEST_SUITE_NAMESPACE); + deleteInstalledYamls(CO_NAMESPACE, TEST_SUITE_NAMESPACE, coDir); NamespaceManager.getInstance().deleteNamespaceWithWait(CO_NAMESPACE); + NamespaceManager.getInstance().deleteNamespaceWithWait(TEST_SUITE_NAMESPACE); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziUpgradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziUpgradeST.java index e067dfe6685..77e0d64e63f 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziUpgradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziUpgradeST.java @@ -5,7 +5,7 @@ package io.strimzi.systemtest.upgrade.regular; import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.systemtest.TestConstants; +import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.KRaftNotSupported; import io.strimzi.systemtest.annotations.KindIPv6NotSupported; import io.strimzi.systemtest.annotations.MicroShiftNotSupported; @@ -25,15 +25,16 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; import java.util.Map; +import static io.strimzi.systemtest.Environment.TEST_SUITE_NAMESPACE; import static io.strimzi.systemtest.TestConstants.CO_NAMESPACE; import static io.strimzi.systemtest.TestConstants.UPGRADE; import static org.hamcrest.CoreMatchers.containsString; @@ -55,15 +56,16 @@ public class StrimziUpgradeST extends AbstractUpgradeST { @ParameterizedTest(name = "from: {0} (using FG <{2}>) to: {1} (using FG <{3}>) ") @MethodSource("io.strimzi.systemtest.upgrade.VersionModificationDataLoader#loadYamlUpgradeData") void testUpgradeStrimziVersion(String fromVersion, String toVersion, String fgBefore, String fgAfter, BundleVersionModificationData upgradeData) throws Exception { + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); assumeTrue(StUtils.isAllowOnCurrentEnvironment(upgradeData.getEnvFlakyVariable())); assumeTrue(StUtils.isAllowedOnCurrentK8sVersion(upgradeData.getEnvMaxK8sVersion())); LOGGER.debug("Running upgrade test from version {} to {} (FG: {} -> {})", fromVersion, toVersion, fgBefore, fgAfter); - performUpgrade(upgradeData); + performUpgrade(CO_NAMESPACE, testStorage, upgradeData); } - @Test + @IsolatedTest void testUpgradeKafkaWithoutVersion() throws IOException { UpgradeKafkaVersion upgradeKafkaVersion = UpgradeKafkaVersion.getKafkaWithVersionFromUrl(acrossUpgradeData.getFromKafkaVersionsUrl(), acrossUpgradeData.getStartingKafkaVersion()); upgradeKafkaVersion.setVersion(null); @@ -71,142 +73,143 @@ void testUpgradeKafkaWithoutVersion() throws IOException { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); // Setup env - setupEnvAndUpgradeClusterOperator(acrossUpgradeData, testStorage, upgradeKafkaVersion, TestConstants.CO_NAMESPACE); + setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, testStorage, acrossUpgradeData, upgradeKafkaVersion); - Map zooSnapshot = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, controllerSelector); - Map kafkaSnapshot = PodUtils.podSnapshot(TestConstants.CO_NAMESPACE, brokerSelector); - Map eoSnapshot = DeploymentUtils.depSnapshot(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName)); + final Map zooSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), controllerSelector); + final Map kafkaSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), brokerSelector); + final Map eoSnapshot = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaResources.entityOperatorDeploymentName(clusterName)); // Make snapshots of all Pods - makeSnapshots(); + makeComponentsSnapshots(testStorage.getNamespaceName()); // Check if UTO is used before changing the CO -> used for check for KafkaTopics - boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TestConstants.CO_NAMESPACE, eoSelector); + boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(testStorage.getNamespaceName(), eoSelector); // Upgrade CO - changeClusterOperator(acrossUpgradeData, TestConstants.CO_NAMESPACE); + changeClusterOperator(CO_NAMESPACE, testStorage.getNamespaceName(), acrossUpgradeData); - logPodImages(TestConstants.CO_NAMESPACE); + logPodImages(CO_NAMESPACE); - RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, controllerSelector, 3, zooSnapshot); - RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, brokerSelector, 3, kafkaSnapshot); - DeploymentUtils.waitTillDepHasRolled(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoSnapshot); + RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), controllerSelector, 3, zooSnapshot); + RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), brokerSelector, 3, kafkaSnapshot); + DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoSnapshot); - logPodImages(TestConstants.CO_NAMESPACE); - checkAllImages(acrossUpgradeData, TestConstants.CO_NAMESPACE); + logPodImages(CO_NAMESPACE); + checkAllComponentsImages(testStorage.getNamespaceName(), acrossUpgradeData); // Verify that Pods are stable - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName); // Verify upgrade - verifyProcedure(acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, wasUTOUsedBefore); - assertThat(KafkaUtils.getVersionFromKafkaPodLibs(KafkaResources.kafkaPodName(clusterName, 0)), containsString(acrossUpgradeData.getProcedures().getVersion())); + verifyProcedure(testStorage.getNamespaceName(), acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore); + assertThat(KafkaUtils.getVersionFromKafkaPodLibs(testStorage.getNamespaceName(), KafkaResources.kafkaPodName(clusterName, 0)), containsString(acrossUpgradeData.getProcedures().getVersion())); } - @Test + @IsolatedTest void testUpgradeAcrossVersionsWithUnsupportedKafkaVersion() throws IOException { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); UpgradeKafkaVersion upgradeKafkaVersion = UpgradeKafkaVersion.getKafkaWithVersionFromUrl(acrossUpgradeData.getFromKafkaVersionsUrl(), acrossUpgradeData.getStartingKafkaVersion()); // Setup env - setupEnvAndUpgradeClusterOperator(acrossUpgradeData, testStorage, upgradeKafkaVersion, TestConstants.CO_NAMESPACE); + setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, testStorage, acrossUpgradeData, upgradeKafkaVersion); // Make snapshots of all Pods - makeSnapshots(); + makeComponentsSnapshots(TEST_SUITE_NAMESPACE); // Check if UTO is used before changing the CO -> used for check for KafkaTopics - boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TestConstants.CO_NAMESPACE, eoSelector); + boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TEST_SUITE_NAMESPACE, eoSelector); // Upgrade CO - changeClusterOperator(acrossUpgradeData, TestConstants.CO_NAMESPACE); - logPodImages(TestConstants.CO_NAMESPACE); + changeClusterOperator(CO_NAMESPACE, TEST_SUITE_NAMESPACE, acrossUpgradeData); + logPodImages(CO_NAMESPACE); // Upgrade kafka - changeKafkaAndLogFormatVersion(acrossUpgradeData); - logPodImages(TestConstants.CO_NAMESPACE); - checkAllImages(acrossUpgradeData, TestConstants.CO_NAMESPACE); + changeKafkaAndLogFormatVersion(TEST_SUITE_NAMESPACE, acrossUpgradeData); + logPodImages(TEST_SUITE_NAMESPACE); + checkAllComponentsImages(TEST_SUITE_NAMESPACE, acrossUpgradeData); // Verify that Pods are stable - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); + PodUtils.verifyThatRunningPodsAreStable(TEST_SUITE_NAMESPACE, clusterName); // Verify upgrade - verifyProcedure(acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, wasUTOUsedBefore); + verifyProcedure(TEST_SUITE_NAMESPACE, acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore); } - @Test + @IsolatedTest void testUpgradeAcrossVersionsWithNoKafkaVersion() throws IOException { final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); // Setup env - setupEnvAndUpgradeClusterOperator(acrossUpgradeData, testStorage, null, TestConstants.CO_NAMESPACE); + setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, testStorage, acrossUpgradeData, null); // Check if UTO is used before changing the CO -> used for check for KafkaTopics - boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TestConstants.CO_NAMESPACE, eoSelector); + boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TEST_SUITE_NAMESPACE, eoSelector); // Upgrade CO - changeClusterOperator(acrossUpgradeData, TestConstants.CO_NAMESPACE); + changeClusterOperator(CO_NAMESPACE, TEST_SUITE_NAMESPACE, acrossUpgradeData); // Wait till first upgrade finished - controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, controllerSelector, 3, controllerPods); - brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.CO_NAMESPACE, brokerSelector, 3, brokerPods); - eoPods = DeploymentUtils.waitTillDepHasRolled(TestConstants.CO_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods); + controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TEST_SUITE_NAMESPACE, controllerSelector, 3, controllerPods); + brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TEST_SUITE_NAMESPACE, brokerSelector, 3, brokerPods); + eoPods = DeploymentUtils.waitTillDepHasRolled(TEST_SUITE_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods); LOGGER.info("Rolling to new images has finished!"); - logPodImages(TestConstants.CO_NAMESPACE); + logPodImages(CO_NAMESPACE); // Upgrade kafka - changeKafkaAndLogFormatVersion(acrossUpgradeData); - logPodImages(TestConstants.CO_NAMESPACE); - checkAllImages(acrossUpgradeData, TestConstants.CO_NAMESPACE); + changeKafkaAndLogFormatVersion(testStorage.getNamespaceName(), acrossUpgradeData); + logPodImages(CO_NAMESPACE); + checkAllComponentsImages(TEST_SUITE_NAMESPACE, acrossUpgradeData); // Verify that Pods are stable - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); + PodUtils.verifyThatRunningPodsAreStable(TEST_SUITE_NAMESPACE, clusterName); // Verify upgrade - verifyProcedure(acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, wasUTOUsedBefore); + verifyProcedure(TEST_SUITE_NAMESPACE, acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore); } @MicroShiftNotSupported("Due to lack of Kafka Connect build feature") @KindIPv6NotSupported("Our current CI setup doesn't allow pushing into internal registries that is needed in this test") - @Test + @IsolatedTest void testUpgradeOfKafkaConnectAndKafkaConnector() throws IOException { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext(), TestConstants.CO_NAMESPACE); + final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); final UpgradeKafkaVersion upgradeKafkaVersion = new UpgradeKafkaVersion(acrossUpgradeData.getDefaultKafka()); - doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(acrossUpgradeData, testStorage, upgradeKafkaVersion); + doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(CO_NAMESPACE, testStorage, acrossUpgradeData, upgradeKafkaVersion); } - private void performUpgrade(BundleVersionModificationData upgradeData) throws IOException { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); + private void performUpgrade(String clusterOperatorNamespaceName, TestStorage testStorage, BundleVersionModificationData upgradeData) throws IOException { // leave empty, so the original Kafka version from appropriate Strimzi's yaml will be used UpgradeKafkaVersion upgradeKafkaVersion = new UpgradeKafkaVersion(); // Setup env - setupEnvAndUpgradeClusterOperator(upgradeData, testStorage, upgradeKafkaVersion, TestConstants.CO_NAMESPACE); - - // Upgrade CO to HEAD - logPodImages(TestConstants.CO_NAMESPACE); + setupEnvAndUpgradeClusterOperator(clusterOperatorNamespaceName, testStorage, upgradeData, upgradeKafkaVersion); + logClusterOperatorPodImage(clusterOperatorNamespaceName); // Check if UTO is used before changing the CO -> used for check for KafkaTopics - boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TestConstants.CO_NAMESPACE, eoSelector); + boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(testStorage.getNamespaceName(), eoSelector); - changeClusterOperator(upgradeData, TestConstants.CO_NAMESPACE); + // Upgrade CO to HEAD + changeClusterOperator(clusterOperatorNamespaceName, testStorage.getNamespaceName(), upgradeData); if (TestKafkaVersion.supportedVersionsContainsVersion(upgradeData.getDefaultKafkaVersionPerStrimzi())) { - waitForKafkaClusterRollingUpdate(); + waitForKafkaClusterRollingUpdate(testStorage.getNamespaceName()); } - logPodImages(TestConstants.CO_NAMESPACE); + logComponentsPodImagesWithConnect(testStorage.getNamespaceName()); // Upgrade kafka - changeKafkaAndLogFormatVersion(upgradeData); - logPodImages(TestConstants.CO_NAMESPACE); - checkAllImages(upgradeData, TestConstants.CO_NAMESPACE); + changeKafkaAndLogFormatVersion(testStorage.getNamespaceName(), upgradeData); + logComponentsPodImagesWithConnect(testStorage.getNamespaceName()); + checkAllComponentsImages(testStorage.getNamespaceName(), upgradeData); // Verify that Pods are stable - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, clusterName); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName); // Verify upgrade - verifyProcedure(upgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), TestConstants.CO_NAMESPACE, wasUTOUsedBefore); + verifyProcedure(testStorage.getNamespaceName(), upgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore); } @BeforeEach void setupEnvironment() { NamespaceManager.getInstance().createNamespaceAndPrepare(CO_NAMESPACE); + NamespaceManager.getInstance().createNamespaceAndPrepare(TEST_SUITE_NAMESPACE); } - protected void afterEachMayOverride() { - cleanUpKafkaTopics(); - ResourceManager.getInstance().deleteResources(); + @AfterEach + void afterEach() { + cleanUpKafkaTopics(TEST_SUITE_NAMESPACE); + deleteInstalledYamls(CO_NAMESPACE, TEST_SUITE_NAMESPACE, coDir); NamespaceManager.getInstance().deleteNamespaceWithWait(CO_NAMESPACE); + NamespaceManager.getInstance().deleteNamespaceWithWait(TEST_SUITE_NAMESPACE); } } diff --git a/systemtest/src/test/resources/upgrade/BundleDowngrade.yaml b/systemtest/src/test/resources/upgrade/BundleDowngrade.yaml index 6640aa0cd81..31a6b5fcddf 100644 --- a/systemtest/src/test/resources/upgrade/BundleDowngrade.yaml +++ b/systemtest/src/test/resources/upgrade/BundleDowngrade.yaml @@ -44,7 +44,7 @@ userOperator: strimzi/operator:0.43.0 deployKafkaVersion: 3.8.0 client: - continuousClientsMessages: 500 + continuousClientsMessages: 300 environmentInfo: maxK8sVersion: latest status: stable @@ -72,7 +72,7 @@ userOperator: strimzi/operator:0.43.0 deployKafkaVersion: 3.8.0 client: - continuousClientsMessages: 500 + continuousClientsMessages: 300 environmentInfo: maxK8sVersion: latest status: stable diff --git a/systemtest/src/test/resources/upgrade/BundleUpgrade.yaml b/systemtest/src/test/resources/upgrade/BundleUpgrade.yaml index f3363fae5a1..79c4c6f81bb 100644 --- a/systemtest/src/test/resources/upgrade/BundleUpgrade.yaml +++ b/systemtest/src/test/resources/upgrade/BundleUpgrade.yaml @@ -41,7 +41,7 @@ topicOperator: strimzi/operator:latest userOperator: strimzi/operator:latest client: - continuousClientsMessages: 500 + continuousClientsMessages: 300 environmentInfo: maxK8sVersion: latest status: stable