From cecbe0e82d0dd63912abb9a22fddc433fa431f25 Mon Sep 17 00:00:00 2001 From: Berto D'Attoma <88311595+bdattoma@users.noreply.github.com> Date: Mon, 18 Mar 2024 18:15:23 +0100 Subject: [PATCH 1/8] Adding link to ticket bugs in some tests (#1296) * adding link to prod bugs * add ref to bugs * add flaky test tags (cherry picked from commit edafbcc301fcb40a1227fb4e0c3ab05edcebe860) --- .../420__model_serving/420__model_serving.robot | 3 ++- .../420__model_serving/LLMs/422__model_serving_llm_UI.robot | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ods_ci/tests/Tests/400__ods_dashboard/420__model_serving/420__model_serving.robot b/ods_ci/tests/Tests/400__ods_dashboard/420__model_serving/420__model_serving.robot index 420d5ef36..d3c4061c3 100644 --- a/ods_ci/tests/Tests/400__ods_dashboard/420__model_serving/420__model_serving.robot +++ b/ods_ci/tests/Tests/400__ods_dashboard/420__model_serving/420__model_serving.robot @@ -51,7 +51,8 @@ Verify Openvino_IR Model Via UI Test Inference Without Token Authentication [Documentation] Test the inference result after having deployed a model that doesn't require Token Authentication - [Tags] Smoke + ... Intermittently failing: RHOAIENG-3115 + [Tags] Smoke FlakyTest ... ODS-2053 Run Keyword And Continue On Failure Verify Model Inference ${MODEL_NAME} ${INFERENCE_INPUT_OPENVINO} ... ${EXPECTED_INFERENCE_OUTPUT_OPENVINO} token_auth=${FALSE} diff --git a/ods_ci/tests/Tests/400__ods_dashboard/420__model_serving/LLMs/422__model_serving_llm_UI.robot b/ods_ci/tests/Tests/400__ods_dashboard/420__model_serving/LLMs/422__model_serving_llm_UI.robot index 6f854bcf4..b9e661f00 100644 --- a/ods_ci/tests/Tests/400__ods_dashboard/420__model_serving/LLMs/422__model_serving_llm_UI.robot +++ b/ods_ci/tests/Tests/400__ods_dashboard/420__model_serving/LLMs/422__model_serving_llm_UI.robot @@ -29,7 +29,8 @@ ${BLOOM_MODEL_S3_DIR}= bloom-560m/bloom-560m-caikit Verify User Can Serve And Query A Model Using The UI [Documentation] Basic tests for preparing, deploying and querying a LLM model ... using Kserve and Caikit runtime - [Tags] Smoke Tier1 ODS-2519 ODS-2522 + ... Intermittently failing: RHOAIENG-3148 + [Tags] Smoke Tier1 ODS-2519 ODS-2522 FlakyTest [Setup] Set Up Project namespace=${TEST_NS} ${test_namespace}= Set Variable ${TEST_NS} ${flan_model_name}= Set Variable flan-t5-small-caikit From 4b11d953da38da45a74d9b9ca87fc2ed21423761 Mon Sep 17 00:00:00 2001 From: Karel Suta Date: Fri, 15 Mar 2024 11:02:33 +0100 Subject: [PATCH 2/8] [RHOAIENG-3757] Kueue: Reduce time of setting up of e2e tests (cherry picked from commit 5e7f4b116e0c83cac3a91846cc31bddfdd934bd1) --- .../test-run-kueue-e2e-tests.robot | 45 ++++++++----------- 1 file changed, 19 insertions(+), 26 deletions(-) diff --git a/ods_ci/tests/Tests/650__distributed_workloads/test-run-kueue-e2e-tests.robot b/ods_ci/tests/Tests/650__distributed_workloads/test-run-kueue-e2e-tests.robot index 251474f4d..0dd386040 100644 --- a/ods_ci/tests/Tests/650__distributed_workloads/test-run-kueue-e2e-tests.robot +++ b/ods_ci/tests/Tests/650__distributed_workloads/test-run-kueue-e2e-tests.robot @@ -9,13 +9,9 @@ Resource ../../../tasks/Resources/RHODS_OLM/install/oc_install.robot *** Variables *** -${KUEUE_DIR} kueue -${KUEUE_REPO_URL} %{KUEUE_REPO_URL=https://github.com/opendatahub-io/kueue.git} -${KUEUE_REPO_BRANCH} %{KUEUE_REPO_BRANCH=dev} -${JOB_GO_BIN} %{WORKSPACE=.}/go-bin -${KUBECONFIG} %{WORKSPACE=.}/kconfig -${WORKER_NODE} ${EMPTY} - +${KUEUE_KUBECONFIG} %{HOME}/.kube/config +${WORKER_NODE} ${EMPTY} +${KUEUE_RELEASE_ASSETS} %{KUEUE_RELEASE_ASSETS=https://github.com/opendatahub-io/kueue/releases/latest/download} *** Test Cases *** Run E2E test @@ -36,11 +32,13 @@ Run Sanity test *** Keywords *** Prepare Kueue E2E Test Suite [Documentation] Prepare Kueue E2E Test Suite - ${result} = Run Process git clone -b ${KUEUE_REPO_BRANCH} ${KUEUE_REPO_URL} ${KUEUE_DIR} - ... shell=true stderr=STDOUT + Log To Console "Downloading compiled test binary e2e-singlecluster" + ${result} = Run Process curl --location --silent --output e2e-singlecluster ${KUEUE_RELEASE_ASSETS}/e2e-singlecluster && chmod +x e2e-singlecluster + ... shell=true + ... stderr=STDOUT Log To Console ${result.stdout} IF ${result.rc} != 0 - FAIL Unable to clone kueue repo ${KUEUE_REPO_URL}:${KUEUE_REPO_BRANCH}:${KUEUE_DIR} + FAIL Unable to retrieve e2e-singlecluster compiled binary END Enable Component kueue @@ -53,19 +51,16 @@ Prepare Kueue E2E Test Suite ${return_code} = Run And Return Rc oc label ${WORKER_NODE} instance-type=on-demand Should Be Equal As Integers ${return_code} 0 msg=Fail to label worker node with instance-type=on-demand - # Use Go install command to install ginkgo - Log To Console Install ginkgo ... - ${result} = Run Process go install github.com/onsi/ginkgo/v2/ginkgo - ... shell=true stderr=STDOUT - ... env:GOBIN=${JOB_GO_BIN} - ... cwd=${KUEUE_DIR} +Teardown Kueue E2E Test Suite + [Documentation] Teardown Kueue E2E Test Suite + Log To Console "Removing test binaries" + ${result} = Run Process rm -f e2e-singlecluster + ... shell=true + ... stderr=STDOUT Log To Console ${result.stdout} IF ${result.rc} != 0 - FAIL Fail to install ginkgo + FAIL Unable to remove files END - -Teardown Kueue E2E Test Suite - [Documentation] Teardown Kueue E2E Test Suite Disable Component kueue # Remove label instance-type=on-demand from worker node @@ -77,10 +72,9 @@ Run Kueue E2E Test [Documentation] Run Kueue E2E Test [Arguments] ${test_name} Log To Console Running Kueue E2E test: ${test_name} - ${result} = Run Process ginkgo --focus-file\=${test_name} ${KUEUE_DIR}/test/e2e/singlecluster + ${result} = Run Process ./e2e-singlecluster -ginkgo.focus-file\=${test_name} ... shell=true stderr=STDOUT - ... env:PATH=%{PATH}:${JOB_GO_BIN} - ... env:KUBECONFIG=${KUBECONFIG} + ... env:KUBECONFIG=${KUEUE_KUBECONFIG} ... env:NAMESPACE=${APPLICATIONS_NAMESPACE} Log To Console ${result.stdout} IF ${result.rc} != 0 @@ -91,10 +85,9 @@ Run Kueue Sanity Test [Documentation] Run Kueue Sanity Test [Arguments] ${test_name} Log To Console Running Kueue Sanity test: ${test_name} - ${result} = Run Process ginkgo --focus "${test_name}" ${KUEUE_DIR}/test/e2e/singlecluster + ${result} = Run Process ./e2e-singlecluster -ginkgo.focus "${test_name}" ... shell=true stderr=STDOUT - ... env:PATH=%{PATH}:${JOB_GO_BIN} - ... env:KUBECONFIG=${KUBECONFIG} + ... env:KUBECONFIG=${KUEUE_KUBECONFIG} ... env:NAMESPACE=${APPLICATIONS_NAMESPACE} Log To Console ${result.stdout} IF ${result.rc} != 0 From 8a443433b31f542f003e80f674cf5479fcb5f23b Mon Sep 17 00:00:00 2001 From: Kobi Hakimi Date: Thu, 7 Mar 2024 18:23:19 +0200 Subject: [PATCH 3/8] add the option to use the new hive related to jira ticket: RHOAIENG-3338 Signed-off-by: Kobi Hakimi (cherry picked from commit b6fb7f74f99b626cb1561e85defaeb86893d0ad2) --- .../Provisioning/Hive/OSP/create_fips.sh | 20 +++++----- .../Hive/OSP/hive_osp_cluster_template.yaml | 5 +-- .../Provisioning/Hive/provision.robot | 38 ++++++++----------- .../provision_self_managed_cluster.robot | 3 +- 4 files changed, 30 insertions(+), 36 deletions(-) diff --git a/ods_ci/tasks/Resources/Provisioning/Hive/OSP/create_fips.sh b/ods_ci/tasks/Resources/Provisioning/Hive/OSP/create_fips.sh index b11e25143..c5d86fbc4 100755 --- a/ods_ci/tasks/Resources/Provisioning/Hive/OSP/create_fips.sh +++ b/ods_ci/tasks/Resources/Provisioning/Hive/OSP/create_fips.sh @@ -8,13 +8,15 @@ export BASE_DOMAIN=${2:-$BASE_DOMAIN} export OSP_NETWORK=${3:-$OSP_NETWORK} export OSP_CLOUD=${4:-openstack} export OUTPUT_DIR=${5:-.${CLUSTER_NAME}_conf} +export AWS_ACCESS_KEY_ID=${6:-$AWS_ACCESS_KEY_ID} +export AWS_SECRET_ACCESS_KEY=${7:-$AWS_SECRET_ACCESS_KEY} # Cluster name should be converted to lowercase -export CLUSTER_NAME=${CLUSTER_NAME,,} +export CLUSTER_NAME=${CLUSTER_NAME,,} if [[ -z $CLUSTER_NAME || -z $BASE_DOMAIN || -z $OSP_NETWORK ]] ; then - echo -e "Some global variables are missing, for example: - # export CLUSTER_NAME=${CLUSTER_NAME:-"rhods-qe-007"} # To set the cluster Subdomain (A Record) in AWS. + echo -e "Some global variables are missing, for example: + # export CLUSTER_NAME=${CLUSTER_NAME:-"rhods-qe-007"} # To set the cluster Subdomain (A Record) in AWS. # export BASE_DOMAIN=${BASE_DOMAIN:-"rhods.ccitredhat.com"} # To set the cluster Domain in AWS. # export OSP_NETWORK=${OSP_NETWORK:-"shared_net_5"} # The external network for the new Floating IPs on OSP. " @@ -88,11 +90,11 @@ fi echo "Updating DNS records (cluster api's) in AWS Route53" RESPONSE=$(aws route53 change-resource-record-sets --hosted-zone-id "$ZONE_ID" --change-batch \ -'{ "Comment": "Update A record for cluster API", "Changes": -[ { "Action": "UPSERT", "ResourceRecordSet": { "Name": "api.'"$CLUSTER_NAME"'.'"$BASE_DOMAIN"'", +'{ "Comment": "Update A record for cluster API", "Changes": +[ { "Action": "UPSERT", "ResourceRecordSet": { "Name": "api.'"$CLUSTER_NAME"'.'"$BASE_DOMAIN"'", "Type": "A", "TTL": 300, "ResourceRecords": [ { "Value": "'"$FIP_API"'" } ] } } ] }' --output json) || rc=$? if [[ -n "$rc" ]] ; then - echo -e "Failed to update DNS A record in AWS for cluster API. + echo -e "Failed to update DNS A record in AWS for cluster API. \n Releasing previously allocated floating IP in $OS_CLOUD ($FIP_API)" openstack floating ip delete "$FIP_API" exit ${rc:+$rc} @@ -103,12 +105,12 @@ aws route53 wait resource-record-sets-changed --id "$(echo "$RESPONSE" | jq -r ' echo "Updating DNS records (cluster ingress) in AWS Route53" RESPONSE=$(aws route53 change-resource-record-sets --hosted-zone-id "$ZONE_ID" --change-batch \ -'{ "Comment": "Update A record for cluster APPS", "Changes": -[ { "Action": "UPSERT", "ResourceRecordSet": { "Name": "*.apps.'"$CLUSTER_NAME"'.'"$BASE_DOMAIN"'", +'{ "Comment": "Update A record for cluster APPS", "Changes": +[ { "Action": "UPSERT", "ResourceRecordSet": { "Name": "*.apps.'"$CLUSTER_NAME"'.'"$BASE_DOMAIN"'", "Type": "A", "TTL": 300, "ResourceRecords": [ { "Value": "'"$FIP_APPS"'" } ] } } ] }' --output json) || rc=$? if [[ -n "$rc" ]] ; then - echo -e "Failed to update DNS A record in AWS for cluster APPS. + echo -e "Failed to update DNS A record in AWS for cluster APPS. \n Releasing previously allocated floating IP in $OS_CLOUD ($FIP_APPS)" openstack floating ip delete "$FIP_APPS" exit ${rc:+$rc} diff --git a/ods_ci/tasks/Resources/Provisioning/Hive/OSP/hive_osp_cluster_template.yaml b/ods_ci/tasks/Resources/Provisioning/Hive/OSP/hive_osp_cluster_template.yaml index 109cc393c..bedb1a998 100644 --- a/ods_ci/tasks/Resources/Provisioning/Hive/OSP/hive_osp_cluster_template.yaml +++ b/ods_ci/tasks/Resources/Provisioning/Hive/OSP/hive_osp_cluster_template.yaml @@ -21,7 +21,6 @@ items: name: ${infrastructure_configurations}[hive_cluster_name]-sec # see line 30 size: 1 maxSize: 1 - maxConcurrent: 1 runningCount: 1 skipMachinePools: true - apiVersion: v1 @@ -61,7 +60,7 @@ items: platform: openstack: cloud: ${infrastructure_configurations}[osp_cloud_name] - computeFlavor: m1.large + computeFlavor: g.standard.xxl externalDNS: null externalNetwork: ${infrastructure_configurations}[osp_network] pullSecret: '${infrastructure_configurations}[quay_pull_sec]' @@ -100,4 +99,4 @@ items: name: ${infrastructure_configurations}[image_set] namespace: ${infrastructure_configurations}[hive_claim_ns] spec: - releaseImage: quay.io/openshift-release-dev/ocp-release:${infrastructure_configurations}[ocp_version]-x86_64 \ No newline at end of file + releaseImage: quay.io/openshift-release-dev/ocp-release:${infrastructure_configurations}[ocp_version]-x86_64 diff --git a/ods_ci/tasks/Resources/Provisioning/Hive/provision.robot b/ods_ci/tasks/Resources/Provisioning/Hive/provision.robot index c6c58e016..e953fe251 100644 --- a/ods_ci/tasks/Resources/Provisioning/Hive/provision.robot +++ b/ods_ci/tasks/Resources/Provisioning/Hive/provision.robot @@ -103,6 +103,7 @@ Create Floating IPs ${shell_script} = Catenate ... ${CURDIR}/OSP/create_fips.sh ${cluster_name} ${infrastructure_configurations}[base_domain] ... ${infrastructure_configurations}[osp_network] ${infrastructure_configurations}[osp_cloud_name] ${artifacts_dir}/ + ... ${infrastructure_configurations}[AWS_ACCESS_KEY_ID] ${infrastructure_configurations}[AWS_SECRET_ACCESS_KEY] ${return_code} = Run and Watch Command ${shell_script} output_should_contain=Exporting Floating IPs Should Be Equal As Integers ${return_code} 0 msg=Error creating floating IPs for cluster '${cluster_name}' ${fips_file_to_export} = Set Variable @@ -145,6 +146,7 @@ Watch Hive Install Log Wait For Cluster To Be Ready ${pool_namespace} = Get Cluster Pool Namespace ${pool_name} + Set Task Variable ${pool_namespace} Log Watching Hive Pool namespace: ${pool_namespace} console=True ${install_log_file} = Set Variable ${artifacts_dir}/${cluster_name}_install.log Create File ${install_log_file} @@ -157,7 +159,7 @@ Wait For Cluster To Be Ready ... oc -n ${pool_namespace} get cd ${pool_namespace} -o json | jq -r '.status.webConsoleURL' --exit-status ... shell=yes ${claim_status} = Run Process - ... oc -n ${hive_namespace} wait --for\=condition\=ClusterRunning\=True clusterclaim ${claim_name} --timeout\=10m shell=yes # robocop: disable:line-too-long + ... oc -n ${hive_namespace} wait --for\=condition\=ClusterRunning\=True clusterclaim ${claim_name} --timeout\=15m shell=yes # robocop: disable:line-too-long # Workaround for old Hive with Openstack - Cluster is displayed as Resuming even when it is Running # add also support to the new Hive where the Cluster is displayed as Running IF "${provider_type}" == "OSP" @@ -177,28 +179,29 @@ Wait For Cluster To Be Ready Save Cluster Credentials Set Task Variable ${cluster_details} ${artifacts_dir}/${cluster_name}_details.txt - Set Task Variable ${cluster_kubeconf} ${artifacts_dir}/kubeconfig - ${pool_namespace} = Get Cluster Pool Namespace ${pool_name} - ${result} = Run Process oc -n ${pool_namespace} get cd ${pool_namespace} -o json | jq -r '.status.apiURL' --exit-status shell=yes - Should Be True ${result.rc} == 0 Hive Cluster deployment '${pool_namespace}' does not have a valid API access + ${result} = Run Process + ... oc -n ${pool_namespace} get cd ${pool_namespace} -o json | jq -r '.status.webConsoleURL' --exit-status + ... shell=yes + Should Be True ${result.rc} == 0 + ... Hive Cluster deployment '${pool_namespace}' does not have a valid webConsoleURL access Create File ${cluster_details} console=${result.stdout}\n - ${ClusterDeployment} = Oc Get kind=ClusterDeployment name=${pool_namespace} - ... namespace=${pool_namespace} api_version=hive.openshift.io/v1 - ${apiURL} = Set Variable "${ClusterDeployment[0]['status']['apiURL']}" - Append to File ${cluster_details} api=${apiURL}\n + ${result} = Run Process + ... oc -n ${pool_namespace} get cd ${pool_namespace} -o json | jq -r '.status.apiURL' --exit-status + ... shell=yes + Append To File ${cluster_details} api=${result.stdout}\n ${result} = Run Process oc extract -n ${pool_namespace} --confirm secret/$(oc -n ${pool_namespace} get cd ${pool_namespace} -o jsonpath\='{.spec.clusterMetadata.adminPasswordSecretRef.name}') --to\=${artifacts_dir} ... shell=yes Should Be True ${result.rc} == 0 ${username} = Get File ${artifacts_dir}/username ${password} = Get File ${artifacts_dir}/password - Append to File ${cluster_details} username=${username}\n - Append to File ${cluster_details} password=${password}\n + Append To File ${cluster_details} username=${username}\n + Append To File ${cluster_details} password=${password}\n ${result} = Run Process oc extract -n ${pool_namespace} --confirm secret/$(oc -n ${pool_namespace} get cd ${pool_namespace} -o jsonpath\='{.spec.clusterMetadata.adminKubeconfigSecretRef.name}') --to\=${artifacts_dir} ... shell=yes Should Be True ${result.rc} == 0 - RETURN ${cluster_kubeconf} Login To Cluster + ${cluster_kubeconf} = Set Variable ${artifacts_dir}/kubeconfig Export Variables From File ${cluster_details} Create File ${cluster_kubeconf} # Test the extracted credentials @@ -209,18 +212,9 @@ Login To Cluster Log ${result.stdout}\n${result.stderr} console=True Should Be True ${result.rc} == 0 -Set Cluster Storage - Log Update Cluster ${cluster_name} Storage Class console=True - ${result} = Run Process oc --kubeconfig\=${cluster_kubeconf} patch StorageClass standard -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}' - ... shell=yes - Log StorageClass standard:\n${result.stdout}\n${result.stderr} console=True - ${result} = Run Process oc --kubeconfig\=${cluster_kubeconf} patch StorageClass standard-csi -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}' - ... shell=yes - Log StorageClass standard-csi:\n${result.stdout}\n${result.stderr} console=True - Run Keyword And Ignore Error Should Be True ${result.rc} == 0 - Get Cluster Pool Namespace [Arguments] ${hive_pool_name} + Log Cluster pool name is: ${hive_pool_name} console=True ${namespace} = Wait Until Keyword Succeeds 2 min 2 s ... Oc Get kind=Namespace label_selector=hive.openshift.io/cluster-pool-name=${hive_pool_name} ${pool_namespace} = Set Variable ${namespace[0]['metadata']['name']} diff --git a/ods_ci/tasks/Tasks/provision_self_managed_cluster.robot b/ods_ci/tasks/Tasks/provision_self_managed_cluster.robot index 0f6133ce1..ccc17e236 100644 --- a/ods_ci/tasks/Tasks/provision_self_managed_cluster.robot +++ b/ods_ci/tasks/Tasks/provision_self_managed_cluster.robot @@ -30,7 +30,6 @@ Provision Self-Managed Cluster Wait For Cluster To Be Ready Save Cluster Credentials Login To Cluster - Set Cluster Storage Pass Execution Self-Managed Cluster ${cluster_name} provisionend successfully Deprovision Self-Managed Cluster @@ -55,4 +54,4 @@ Delete GPU Node From Self-Managed AWS Cluster Disconnect Self-Managed Cluster [Documentation] Disconnect a self-managed cluster [Tags] self_managed_disconnect - Disconnect Cluster \ No newline at end of file + Disconnect Cluster From 0884f969450015ae91c09651c4cb50b87b2dca88 Mon Sep 17 00:00:00 2001 From: Jiri Petrlik Date: Thu, 21 Mar 2024 17:05:47 +0100 Subject: [PATCH 4/8] Fix kueue smoke test (cherry picked from commit 94852466b134710ca41c92cc3d77944b11287497) --- ods_ci/tests/Tests/650__distributed_workloads/test-smoke.robot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ods_ci/tests/Tests/650__distributed_workloads/test-smoke.robot b/ods_ci/tests/Tests/650__distributed_workloads/test-smoke.robot index 64d0b04c6..c70f46801 100644 --- a/ods_ci/tests/Tests/650__distributed_workloads/test-smoke.robot +++ b/ods_ci/tests/Tests/650__distributed_workloads/test-smoke.robot @@ -62,7 +62,7 @@ Kueue smoke test [Teardown] Disable Component kueue Wait Component Ready kueue Log To Console Waiting for kueue-controller-manager to be available - ${result} = Run Process oc wait --for\=condition\=Available --timeout\=60s -n ${ODH_NAMESPACE} deployment/kueue-controller-manager + ${result} = Run Process oc wait --for\=condition\=Available --timeout\=300s -n ${ODH_NAMESPACE} deployment/kueue-controller-manager ... shell=true stderr=STDOUT Log To Console ${result.stdout} IF ${result.rc} != 0 From 5a9c83b8908d08aafdb033ebadf58372c870a790 Mon Sep 17 00:00:00 2001 From: Jan Stourac Date: Sun, 24 Mar 2024 20:01:11 +0100 Subject: [PATCH 5/8] [Fix] request body logged content There were two headers section logged instead of one request body section logged in the report. (cherry picked from commit abfb8a5d6623302040f18b5202b9aed044e5f01c) --- .../Resources/Page/ODH/ODHDashboard/ODHDashboardAPI.resource | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ods_ci/tests/Resources/Page/ODH/ODHDashboard/ODHDashboardAPI.resource b/ods_ci/tests/Resources/Page/ODH/ODHDashboard/ODHDashboardAPI.resource index e6a43835d..1ebcb38be 100644 --- a/ods_ci/tests/Resources/Page/ODH/ODHDashboard/ODHDashboardAPI.resource +++ b/ods_ci/tests/Resources/Page/ODH/ODHDashboard/ODHDashboardAPI.resource @@ -24,7 +24,7 @@ Perform Request Remove OAuth Token From Header headers=${response.headers} token=${request_args}[headers][Cookie] Set Log Level INFO Set To Dictionary ${LOG_DICT} url=${response.request.url} headers=${response.request.headers} - ... body=${response.request.headers} status_code=${response.status_code} + ... body=${response.request.body} status_code=${response.status_code} Log ${request_type} Request: ${LOG_DICT} Set To Dictionary ${LOG_RESP_DICT} url=${response.url} headers=${response.headers} body=${response.text} ... status_code=${response.status_code} reason=${response.reason} From a2e996beb51afcf8ed16867f563585dbb23996b0 Mon Sep 17 00:00:00 2001 From: Jan Stourac Date: Sun, 24 Mar 2024 22:27:46 +0100 Subject: [PATCH 6/8] [Fix] cleanup for the dashboard api tests The cleanup only checked for resources of two users but in the tests there are used three different users, so let's clean after all three. (cherry picked from commit fc76761919c306b46ad60fecd0bcacd0e5a5e0b4) --- .../400__ods_dashboard/414__ods_dashboard_api.robot | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/ods_ci/tests/Tests/400__ods_dashboard/414__ods_dashboard_api.robot b/ods_ci/tests/Tests/400__ods_dashboard/414__ods_dashboard_api.robot index c0a9df323..1ac15e0f9 100644 --- a/ods_ci/tests/Tests/400__ods_dashboard/414__ods_dashboard_api.robot +++ b/ods_ci/tests/Tests/400__ods_dashboard/414__ods_dashboard_api.robot @@ -835,11 +835,11 @@ Delete Dummy ConfigMaps Delete Test Notebooks CRs And PVCs From CLI [Documentation] Stops all the notebook servers spanwed during a test by ... deleting their CRs. At the end it closes any opened browsers - ${CR_1}= Get User CR Notebook Name ${TEST_USER_3.USERNAME} - ${CR_2}= Get User CR Notebook Name ${TEST_USER_4.USERNAME} - ${test_crs}= Create List ${CR_1} ${CR_2} - FOR ${nb_cr} IN @{test_crs} - ${present}= Run Keyword And Return Status OpenshiftLibrary.Oc Get kind=Notebook namespace=${NOTEBOOK_NS} name=${nb_cr} + ${test_usernames}= Create List ${TEST_USER.USERNAME} ${TEST_USER_3.USERNAME} ${TEST_USER_4.USERNAME} + FOR ${username} IN @{test_usernames} + ${nb_cr}= Get User CR Notebook Name ${username} + ${present}= Run Keyword And Return Status + ... OpenshiftLibrary.Oc Get kind=Notebook namespace=${NOTEBOOK_NS} name=${nb_cr} IF ${present} == ${FALSE} Continue For Loop ELSE @@ -847,9 +847,10 @@ Delete Test Notebooks CRs And PVCs From CLI END END Close All Browsers + ${PVC_ADMIN_USER}= Get User Notebook PVC Name ${TEST_USER.USERNAME} ${PVC_BASIC_USER}= Get User Notebook PVC Name ${TEST_USER_3.USERNAME} ${PVC_BASIC_USER_2}= Get User Notebook PVC Name ${TEST_USER_4.USERNAME} - ${test_pvcs}= Create List ${PVC_BASIC_USER} ${PVC_BASIC_USER_2} + ${test_pvcs}= Create List ${PVC_ADMIN_USER} ${PVC_BASIC_USER} ${PVC_BASIC_USER_2} Delete Test PVCs pvc_names=${test_pvcs} Set Username In Secret Payload From 6b568e1d9456dcd56d3419444dea2850d285e6f1 Mon Sep 17 00:00:00 2001 From: Jan Stourac Date: Fri, 15 Mar 2024 19:28:08 +0100 Subject: [PATCH 7/8] [Fix] Notebook API test This test was probably failing since the addition of the accelerator profiles. Since I have no idea where is some documentation to the dashboard API which we use here, I grepped my local disk for some keywords and was able to find this file [1] from which I was able to understand what is wrong with this test. As such, please review thoroughly since I could miss something. [1] https://github.com/opendatahub-io/odh-dashboard/blob/main/frontend/src/types.ts#L670> (cherry picked from commit 439e2b31a9612c356fe2b4670d4aca3b2504dd2a) --- .../Tests/400__ods_dashboard/414__ods_dashboard_api.robot | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/ods_ci/tests/Tests/400__ods_dashboard/414__ods_dashboard_api.robot b/ods_ci/tests/Tests/400__ods_dashboard/414__ods_dashboard_api.robot index 1ac15e0f9..ed5b930c5 100644 --- a/ods_ci/tests/Tests/400__ods_dashboard/414__ods_dashboard_api.robot +++ b/ods_ci/tests/Tests/400__ods_dashboard/414__ods_dashboard_api.robot @@ -1,6 +1,8 @@ *** Settings *** Documentation Suite for a basic security test of Dashboard APIs. The tests verifies that user -... reach endpoints based on their user permissions +... reach endpoints based on their user permissions. +... Refer to this file https://github.com/opendatahub-io/odh-dashboard/blob/main/frontend/src/types.ts +... to read particular API definitions. Library OpenShiftLibrary Library SeleniumLibrary Resource ../../Resources/Common.robot @@ -72,8 +74,8 @@ ${VALIDATE_ISV_RESULT_ENDPOINT}= api/validate-isv/results?appName=anacon ${NB_ENDPOINT_PT0}= api/notebooks ${NB_ENDPOINT_PT1}= ${NB_ENDPOINT_PT0}/${NOTEBOOK_NS}/ ${NB_ENDPOINT_PT2}= /status -${NB_ENDPOINT_BODY_A}= {"notebookSizeName":"Small","imageName":"s2i-minimal-notebook","imageTagName":"","url":"${ODH_DASHBOARD_URL}","gpus":0,"envVars":{"configMap":{},"secrets":{"super-secre":"my new secret 20!"}},"state":"started"} -${NB_ENDPOINT_BODY_B}= {"notebookSizeName":"Small","imageName":"s2i-minimal-notebook","imageTagName":"","url":"${ODH_DASHBOARD_URL}","gpus":0,"envVars":{"configMap":{},"secrets":{"super-secre":"my new secret 20!"}},"state":"started","username":""} +${NB_ENDPOINT_BODY_A}= {"notebookSizeName":"Small","imageName":"s2i-minimal-notebook","imageTagName":"","acceleratorProfile": {"count": 0},"envVars":{"configMap":{},"secrets":{"super-secret":"my new secret 20!"}},"state":"started"} #robocop: disable:line-too-long +${NB_ENDPOINT_BODY_B}= {"notebookSizeName":"Small","imageName":"s2i-minimal-notebook","imageTagName":"","acceleratorProfile": {"count": 0},"envVars":{"configMap":{},"secrets":{"super-secret":"my new secret 20!"}},"state":"started","username":""} #robocop: disable:line-too-long ${NB_STOP_ENDPOINT_BODY_A}= {"state":"stopped"} ${NB_STOP_ENDPOINT_BODY_B}= {"state":"stopped","username": ""} From 1ef8bb5d71c551a66eec6449e6faf60f5caa5d74 Mon Sep 17 00:00:00 2001 From: tarukumar <93319437+tarukumar@users.noreply.github.com> Date: Wed, 27 Mar 2024 13:43:48 +0530 Subject: [PATCH 8/8] Use cluster id while adding machinepool (#1324) (cherry picked from commit 5db9e26c59c9923a14e4db77e3f3702859037f79) --- ods_ci/utils/scripts/ocm/ocm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ods_ci/utils/scripts/ocm/ocm.py b/ods_ci/utils/scripts/ocm/ocm.py index 9054eba54..7f320c8d2 100644 --- a/ods_ci/utils/scripts/ocm/ocm.py +++ b/ods_ci/utils/scripts/ocm/ocm.py @@ -378,9 +378,10 @@ def add_machine_pool(self): if bool(self.reuse_machine_pool) and self.check_if_machine_pool_exists(): log.info(f"MachinePool with name {self.pool_name} exists in cluster {self.cluster_name}. Hence reusing it") else: + cluster_id = self.get_osd_cluster_id() cmd = "ocm --v={} create machinepool --cluster {} --instance-type {} --replicas {} --taints {} {}".format( self.ocm_verbose_level, - self.cluster_name, + self.cluster_id, self.pool_instance_type, self.pool_node_count, self.taints,