diff --git a/.github/README.workflows.adoc b/.github/README.workflows.adoc new file mode 100644 index 00000000..29af217b --- /dev/null +++ b/.github/README.workflows.adoc @@ -0,0 +1,7 @@ +NOTE: If you rename any of the tests in this folder, or want to add new required ones, please remember to submit a PR against the link:https://github.com/openshift/release[openshift/releases] repo with the updated list of `required_status_checks` for pull requests. + +For example: + +* link:https://github.com/openshift/release/tree/master/core-services/prow/02_config/janus-idp/[core-services/prow/02_config/janus-idp/] +* link:https://github.com/openshift/release/tree/master/core-services/prow/02_config/redhat-developer[core-services/prow/02_config/redhat-developer] + diff --git a/.github/renovate.json b/.github/renovate.json index 4fad9176..39f08582 100644 --- a/.github/renovate.json +++ b/.github/renovate.json @@ -4,7 +4,9 @@ ":gitSignOff", ":rebaseStalePrs", "group:allNonMajor", - "docker:disableMajor" + "docker:disableMajor", + "default:pinDigestsDisabled", + "helpers:pinGitHubActionDigests" ], "labels": [ "kind/dependency upgrade" @@ -14,55 +16,191 @@ "/^1\\..*\\.x/" ], "constraints": { - "go": "1.20" + "go": "1.21" }, "packageRules": [ { - "matchCategories": [ + "description": "Do NOT generate PRs to pin or apply digests to dockerfiles", + "enabled": false, + "matchDatasources": [ "docker" ], "matchUpdateTypes": [ - "patch" + "pin","pinDigest", "digest" ], - "digest": { - "enabled": false - }, - "automerge": true, - "pinDigests": false + "automerge": false }, { + "description": "Do automerge patch updates to dockerfiles", + "enabled": true, "matchDatasources": [ "docker" ], "matchUpdateTypes": [ "patch" ], - "digest": { - "enabled": false - }, "automerge": true, "pinDigests": false }, { + "description": "k8s go: minor and patch updates in main", + "enabled": true, + "groupName": "k8s-go main", + "matchDatasources": [ + "go" + ], + "matchPackagePrefixes": [ + "k8s.io/api", + "k8s.io/apimachinery", + "k8s.io/client-go" + ], + "matchUpdateTypes": [ + "minor", "patch" + ], + "baseBranches": [ + "main" + ], + "automerge": false + }, + { + "description": "k8s go: patch updates only in 1.y.x", + "enabled": true, + "groupName": "k8s-go 1.y.x", + "matchDatasources": [ + "go" + ], + "matchPackagePrefixes": [ + "k8s.io/api", + "k8s.io/apimachinery", + "k8s.io/client-go" + ], + "matchUpdateTypes": [ + "patch" + ], + "baseBranches": [ + "/^1\\..*\\.x/" + ], + "automerge": false + }, + { + "description": "ginkgo: minor and patch updates in main", + "enabled": true, + "groupName": "ginkgo main", + "matchDatasources": [ + "go" + ], + "matchPackagePrefixes": [ + "github.com/onsi/ginkgo/v2" + ], + "matchUpdateTypes": [ + "minor", "patch" + ], + "baseBranches": [ + "main" + ], + "automerge": false + }, + { + "description": "ginkgo: patch updates only in 1.y.x", + "enabled": true, + "groupName": "ginkgo 1.y.x", + "matchDatasources": [ + "go" + ], + "matchPackagePrefixes": [ + "github.com/onsi/ginkgo/v2" + ], + "matchUpdateTypes": [ + "patch" + ], + "baseBranches": [ + "/^1\\..*\\.x/" + ], + "automerge": false + }, + { + "description": "gomega: minor and patch updates in main", + "enabled": true, + "groupName": "gomega main", + "matchDatasources": [ + "go" + ], + "matchPackagePrefixes": [ + "github.com/onsi/gomega" + ], + "matchUpdateTypes": [ + "minor", "patch" + ], + "baseBranches": [ + "main" + ], + "automerge": false + }, + { + "description": "gomega: patch updates only in 1.y.x", + "enabled": true, + "groupName": "gomega 1.y.x", + "matchDatasources": [ + "go" + ], + "matchPackagePrefixes": [ + "github.com/onsi/gomega" + ], + "matchUpdateTypes": [ + "patch" + ], + "baseBranches": [ + "/^1\\..*\\.x/" + ], + "automerge": false + }, + { + "description": "Do NOT generate PRs for major go dependency updates ", + "enabled": false, + "matchDatasources": [ + "go" + ], + "matchUpdateTypes": [ + "major" + ], + "automerge": false + }, + { + "description": "Do automerge go dependency patch updates, except for versions starting with 0", + "enabled": true, "matchDatasources": [ "go" ], "matchUpdateTypes": [ - "minor,", "patch" ], "matchCurrentVersion": "!/^0/", - "automerge": true, - "pinDigests": true + "automerge": true }, { + "description": "Do generate PRs for golang version patch bumps, keeping x.yy version the same", + "enabled": true, "matchDatasources": [ "golang-version" ], - "rangeStrategy": "bump", - "enabled": false, + "matchUpdateTypes": [ + "patch" + ], "automerge": false + }, + { + "description": "Do automerge and pin actions in GH workflows, except for versions starting with 0", + "enabled": true, + "matchDatasources": [ + "github-runners" + ], + "matchUpdateTypes": [ + "minor","patch" + ], + "matchCurrentVersion": "!/^0/", + "automerge": true } + ], "vulnerabilityAlerts": { "enabled": true, diff --git a/.github/workflows/next-container-build.yaml b/.github/workflows/next-container-build.yaml index abf3d61e..0df120e5 100644 --- a/.github/workflows/next-container-build.yaml +++ b/.github/workflows/next-container-build.yaml @@ -38,7 +38,7 @@ jobs: packages: write steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@1d96c772d19495a3b5c517cd2bc0cb401ea0529f # v4 with: fetch-depth: 0 @@ -72,7 +72,7 @@ jobs: - name: Setup Go # run this stage only if there are changes that match the includes and not the excludes if: ${{ env.CHANGES != '' }} - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 with: go-version-file: 'go.mod' diff --git a/.github/workflows/nightly.yaml b/.github/workflows/nightly.yaml index 637e1215..8d324459 100644 --- a/.github/workflows/nightly.yaml +++ b/.github/workflows/nightly.yaml @@ -12,15 +12,19 @@ jobs: strategy: fail-fast: false matrix: - branch: [ main, 1.1.x ] - name: E2E Tests - ${{ matrix.branch }} + branch: [ main, 1.2.x, 1.1.x ] + test_upgrade: [ 'true', 'false' ] + exclude: + - branch: 1.1.x # Testing upgrade from 1.1.x + test_upgrade: 'true' + name: 'E2E Tests - ${{ matrix.branch }} - upgrade=${{ matrix.test_upgrade }}' concurrency: - group: ${{ github.workflow }}-${{ matrix.branch }} + group: '${{ github.workflow }}-${{ matrix.branch }}-${{ matrix.test_upgrade }}' cancel-in-progress: true env: CONTAINER_ENGINE: podman steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 # default branch will be checked out by default on scheduled workflows + - uses: actions/checkout@1d96c772d19495a3b5c517cd2bc0cb401ea0529f # v4 # default branch will be checked out by default on scheduled workflows with: fetch-depth: 0 @@ -29,25 +33,37 @@ jobs: run: git switch ${{ matrix.branch }} - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 with: go-version-file: 'go.mod' - name: Determine built operator image run: | - latestNext="next" - # for main branch, use next tags; for 1.x branches, use :latest tags - if [[ $(git rev-parse --abbrev-ref HEAD) != "main" ]]; then - latestNext="latest" - fi - echo "OPERATOR_IMAGE=quay.io/janus-idp/operator:${latestNext}" >> $GITHUB_ENV + echo "OPERATOR_IMAGE=$(make show-img)" >> $GITHUB_ENV + + - name: Check if image exists in remote registry + id: operator-image-existence-checker + run: | + echo "OPERATOR_IMAGE_EXISTS=$(if skopeo inspect "docker://${{ env.OPERATOR_IMAGE }}" > /dev/null; then echo "true"; else echo "false"; fi)" >> $GITHUB_OUTPUT + + - name: Display warning if image was not found + if: ${{ steps.operator-image-existence-checker.outputs.OPERATOR_IMAGE_EXISTS == 'false' }} + run: | + echo "::warning ::Image ${{ env.OPERATOR_IMAGE }} not found for testing the ${{ matrix.branch }} branch. It might have expired. E2E tests will be skipped for ${{ matrix.branch }}." - name: Start Minikube - uses: medyagh/setup-minikube@317d92317e473a10540357f1f4b2878b80ee7b95 # v0.0.16 - with: - addons: ingress + if: ${{ steps.operator-image-existence-checker.outputs.OPERATOR_IMAGE_EXISTS == 'true' }} + uses: medyagh/setup-minikube@d8c0eb871f6f455542491d86a574477bd3894533 # v0.0.18 + + - name: Run E2E tests (Operator Upgrade path) + if: ${{ matrix.test_upgrade == 'true' && steps.operator-image-existence-checker.outputs.OPERATOR_IMAGE_EXISTS == 'true' }} + env: + BACKSTAGE_OPERATOR_TESTS_PLATFORM: minikube + IMG: ${{ env.OPERATOR_IMAGE }} + run: make test-e2e-upgrade - name: Run E2E tests + if: ${{ matrix.test_upgrade == 'false' && steps.operator-image-existence-checker.outputs.OPERATOR_IMAGE_EXISTS == 'true' }} env: BACKSTAGE_OPERATOR_TESTS_PLATFORM: minikube IMG: ${{ env.OPERATOR_IMAGE }} diff --git a/.github/workflows/pr-bundle-diff-checks.yaml b/.github/workflows/pr-bundle-diff-checks.yaml index f1098cf1..4b456a9a 100644 --- a/.github/workflows/pr-bundle-diff-checks.yaml +++ b/.github/workflows/pr-bundle-diff-checks.yaml @@ -34,7 +34,7 @@ jobs: # see list of approvers in OWNERS file environment: ${{ (github.event.pull_request.head.repo.full_name == github.repository || - contains(fromJSON('["gazarenkov","jianrongzhang89","kadel","nickboldt","rm3l","kim-tsao","openshift-cherrypick-robot"]'), github.actor)) && 'internal' || 'external' }} + contains(fromJSON('["coreydaley","gazarenkov","kadel","nickboldt","rm3l","kim-tsao","openshift-cherrypick-robot"]'), github.actor)) && 'internal' || 'external' }} runs-on: ubuntu-latest steps: - name: approved @@ -49,14 +49,14 @@ jobs: pull-requests: write steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@1d96c772d19495a3b5c517cd2bc0cb401ea0529f # v4 with: fetch-depth: 0 repository: ${{github.event.pull_request.head.repo.full_name}} ref: ${{ github.event.pull_request.head.ref }} - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 with: go-version-file: 'go.mod' @@ -97,3 +97,10 @@ jobs: repo: context.repo.repo, body: '⚠️ Files changed in bundle generation!

Those changes to the operator bundle manifests should have been pushed automatically to your PR branch.
You might also need to manually update the [`.rhdh/bundle/manifests/rhdh-operator.csv.yaml`](${{ env.GH_BLOB_VIEWER_BASE_URL }}/.rhdh/bundle/manifests/rhdh-operator.csv.yaml) CSV file accordingly.' }) + + - name: Check if the CSV for RHDH needs to be updated + run: | + echo "Checking that the RBAC roles of the downstream RHDH operator are not out of sync with the upstream CSV..." + diff -U 1 \ + <(yq '.spec.install.spec.clusterPermissions' bundle/manifests/backstage-operator.clusterserviceversion.yaml | grep -v 'serviceAccountName: ') \ + <(yq '.spec.install.spec.clusterPermissions' .rhdh/bundle/manifests/rhdh-operator.csv.yaml | grep -v 'serviceAccountName: ') diff --git a/.github/workflows/pr-container-build.yaml b/.github/workflows/pr-container-build.yaml index 783cb19f..77daec4c 100644 --- a/.github/workflows/pr-container-build.yaml +++ b/.github/workflows/pr-container-build.yaml @@ -41,7 +41,7 @@ jobs: # see list of approvers in OWNERS file environment: ${{ (github.event.pull_request.head.repo.full_name == github.repository || - contains(fromJSON('["gazarenkov","jianrongzhang89","kadel","nickboldt","rm3l","kim-tsao","openshift-cherrypick-robot"]'), github.actor)) && 'internal' || 'external' }} + contains(fromJSON('["coreydaley","gazarenkov","kadel","nickboldt","rm3l","kim-tsao","openshift-cherrypick-robot"]'), github.actor)) && 'internal' || 'external' }} runs-on: ubuntu-latest steps: - name: approved @@ -59,7 +59,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@1d96c772d19495a3b5c517cd2bc0cb401ea0529f # v4 with: fetch-depth: 0 ref: ${{ github.event.pull_request.head.ref }} @@ -86,7 +86,7 @@ jobs: - name: Setup Go # run this stage only if there are changes that match the includes and not the excludes if: ${{ env.CHANGES != '' }} - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 with: go-version-file: 'go.mod' diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index fe2eb8a0..ebcac889 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@1d96c772d19495a3b5c517cd2bc0cb401ea0529f # v4 with: fetch-depth: 0 @@ -50,7 +50,7 @@ jobs: } >> "$GITHUB_ENV" - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 with: go-version-file: 'go.mod' diff --git a/.rhdh/bundle/manifests/rhdh-operator.csv.yaml b/.rhdh/bundle/manifests/rhdh-operator.csv.yaml index 6f5bd916..fe88fde5 100644 --- a/.rhdh/bundle/manifests/rhdh-operator.csv.yaml +++ b/.rhdh/bundle/manifests/rhdh-operator.csv.yaml @@ -7,7 +7,7 @@ metadata: alm-examples: |- [ { - "apiVersion": "rhdh.redhat.com/v1alpha1", + "apiVersion": "rhdh.redhat.com/v1alpha2", "kind": "Backstage", "metadata": { "name": "developer-hub" @@ -19,11 +19,11 @@ metadata: capabilities: Seamless Upgrades categories: Developer Tools certified: 'true' - containerImage: registry-proxy.engineering.redhat.com/rh-osbs/rhdh-rhdh-rhel9-operator:1.2 + containerImage: registry-proxy.engineering.redhat.com/rh-osbs/rhdh-rhdh-rhel9-operator:1.3 createdAt: "2023-03-20T16:11:34Z" description: Red Hat Developer Hub is a Red Hat supported version of Backstage. - It comes with pre-built plug-ins, configuration settings, and deployment mechanisms, - which can help streamline the process of setting up a self-managed internal + It comes with pre-built plug-ins and configuration settings, supports use of an external database, and can + help streamline the process of setting up a self-managed internal developer portal for adopters who are just starting out. operatorframework.io/suggested-namespace: rhdh-operator operators.openshift.io/valid-subscription: '["Red Hat Developer Hub"]' @@ -42,8 +42,8 @@ metadata: features.operators.openshift.io/token-auth-gcp: "false" repository: https://gitlab.cee.redhat.com/rhidp/rhdh/ support: Red Hat - skipRange: '>=1.0.0 <1.2.0' - name: rhdh-operator.v1.2.0 + skipRange: '>=1.0.0 <1.3.0' + name: rhdh-operator.v1.3.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -57,18 +57,31 @@ spec: kind: Backstage name: backstages.rhdh.redhat.com version: v1alpha1 + - description: Backstage is the Schema for the Red Hat Developer Hub backstages API. + It comes with pre-built plug-ins, configuration settings, and deployment mechanisms, + which can help streamline the process of setting up a self-managed internal + developer portal for adopters who are just starting out. + displayName: Red Hat Developer Hub + kind: Backstage + name: backstages.rhdh.redhat.com + version: v1alpha2 description: | - Red Hat Developer Hub is an enterprise-grade platform for building developer portals, containing a supported and opinionated framework. By implementing a unified and open platform designed to maximize developer skills, ease onboarding, and increase development productivity, focus can be centered on what really matters: writing great code. Red Hat Developer Hub also offers Software Templates to simplify the development process, which can reduce friction and frustration for development teams, boosting their productivity and increasing an organization's competitive advantage. + Red Hat Developer Hub is an enterprise-grade platform for building developer portals, containing a supported and opinionated framework. It comes with pre-built plug-ins and configuration settings, supports use of an external database, and can help streamline the process of setting up a self-managed internal developer portal for adopters who are just starting out. By implementing a unified and open platform designed to maximize developer skills, ease onboarding, and increase development productivity, focus can be centered on what really matters: writing great code. Red Hat Developer Hub also offers Software Templates to simplify the development process, which can reduce friction and frustration for development teams, boosting their productivity and increasing an organization's competitive advantage. System Architects can benefit by implementing a tailored platform with a complementary suite of verified and curated tools and components needed for operations teams to support developers—within a centralized, consistent location. Development teams can experience increased productivity, fewer development team obstacles, and simplified governance of technology choices with self-service and guardrails. + ## Telemetry data collection + + The telemetry data collection feature is enabled by default. Red Hat Developer Hub sends telemetry data to Red Hat by using the `backstage-plugin-analytics-provider-segment` plugin. To disable this and to learn what data is being collected, see [Red Hat Developer Hub documentation on telemetry data collection](https://access.redhat.com/documentation/en-us/red_hat_developer_hub/1.3/html-single/administration_guide_for_red_hat_developer_hub/index#assembly-rhdh-telemetry_admin-rhdh). + ## More Information * [Red Hat Developer Hub Product Page](https://www.redhat.com/en/technologies/cloud-computing/developer-hub) * [Product Documentation](https://access.redhat.com/documentation/en-us/red_hat_developer_hub) * [Life Cycle](https://access.redhat.com/node/7025299) * [Support Policies](https://access.redhat.com/policy/developerhub-support-policy) - + * [Configuring external PostgreSQL databases](https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.3/html/administration_guide_for_red_hat_developer_hub/assembly-configuring-external-postgresql-databases#assembly-configuring-external-postgresql-databases) + displayName: Red Hat Developer Hub Operator icon: - base64data: iVBORw0KGgoAAAANSUhEUgAAAMAAAADACAYAAABS3GwHAAAACXBIWXMAAA7DAAAOwwHHb6hkAAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAAIABJREFUeJztnXlcU1f6/z9JSNhCKBB2EBRQRBjcmFo7KuKAdcFlXNra2hmnrThTl+52bL+d1nFqtdPalqmjtbYddaZWrVVBrVD3X7W2iBtYERCVVXYS1pDk/v644oDec3KT3EDA+369fLXk3HvOQzife8/ynOeRMAwDeyQ6OrofgAQHB4cH5XJ5jFwuD1EoFA9IpVIHmUzm4ODgIJPJZBKpVNrTpt7XGI1GGAwGRq/XGwwGg95oNOp1Ol19e3v7jfb29kt6vf4MgCM5OTk3e9pWLiT2IoDo6Gg/qVT6lJOT00ylUvkrV1dXF4lE0tNmiQhES0tLu1arLWhtbf2uqanpk/z8/MKetgnoYQEMGjRogIuLyyqVSjVVpVK5iR3+/oBhGDQ0NGgaGhr2t7a2vpGXl3etp2zpdgGMHDlS3tbW9lc3N7c/eHp6BopDmPsbo9GI2traUo1G84WTk9PKrKys9u5sv9sEEBER4SiXy9/19fVd5Obm5tQtjYr0KpqamnSVlZU7b926taiysrKxO9q0uQD8/PxcfX19P/P29p7t4uLiYNPGRPoEzc3N+qqqqp3V1dXPlJaWNtuyLZsKYMiQIUt8fX3fUyqVjkLUp9PpoNPpYDAY7vzXaDQKUbWIhUilUshkMigUijv/VSgUgtTd2NjYVlFR8ZfLly+vE6RCDmwigPDw8Ghvb+893t7eYZbczzAMNBoNampqoNVq0djYiMbGRuh0OqFNFbEBCoUCSqUSSqUSbm5uUKvVcHNzg6WLHBUVFcVVVVXTioqKzgtsqvACiI6O/jA4OHipXC4367dtb29HeXk5KisrUVNTI3b2PoZCoYCXlxd8fHzg7+8PuVxu1v06nY4pKSn5MCcn50Uh7RJMABERESoPD4/jfn5+Q/newzAMKisrUVJSgoqKCnE4c58gk8ng5+eHoKAgeHt7m/VmKCsr+6WqqurhGzdu1AlhiyACiIiISAoMDNyjUqmc+VxvNBpRXFyMgoICNDfbdI4jYue4uroiPDwcQUFB4LskrtFomsvLy6fl5eUdtrZ9qwUQGRm5KDQ0dL1CoTApY6PRiBs3bqCgoACtra1WtSvSt3B2dkZ4eDhCQkJ4vRF0Oh1TVFS0JC8v7xNr2rVKAFFRUf8XGhq60sHB9OpmbW0tLl68CK1Wa3F7In0fpVKJmJgYqNVqk9caDAZcv359dW5u7gpL27NYALGxsf8KDg5eZOq1pdPpkJubi5KSEovaEbk/CQ4ORlRUlMkl1dujis2XLl16xpJ2LBJATEzMZ6GhoU+belXV1tbi7Nmz4nBHxCKcnZ0xfPhweHp6Uq9jGAZFRUVf5uTkLDC3DbMFEBUV9X8DBgxYaerJX1RUhMuXL4srOyJWIZFIMHDgQERERFDnBkajEdeuXfv75cuX3zCrfnMEEBkZuWjAgAH/oo35jUYjsrOzUV5ebo4dIiJUAgICMGzYMOpKkV6vR2Fh4WJzJsa8BRAREZEUFhb2HW21R6/X46effkJNTQ3f9kVEeKNWqxEXFwfaA/j26lBSXl7e93zq5CWAiIgIVWBgYLlKpXIhXdPW1oYzZ86goaGBT7siIhbh7u6OBx98EI6OZPeyhoaGlpKSEv/CwkKTnZHXzoO7u/tpWufX6/Vi5xfpFhoaGvDjjz+ivZ18bMDd3d3Z09PzJJ/6TAogOjp6bUBAQBSp3Gg0IisrS+z8It2GRqPBzz//TF1g8ff3j4mOjv7QVF1UAQwYMCA2ODj4ZVI5wzDIzs5GVVWVqXZERASlpqYG586do14TFBS0NDw8PJp2DVUAPj4+e2henQUFBeJqj0iPUVZWhsJC8tl6hUIh8fLy2kOrgyiAqKio5318fEJJ5XV1dbh69SofO0VEbMYvv/yC2tpaYrmPj0/YoEGDXiCVcwogMDDQxc/P713STTqdDllZWeIml0iP0zEMJ02KJRIJAgMDVwcGBnIu4nAKQK1Wf047xpibmyu6N4jYDS0tLcjNzSWWK5VKR29v70+5yu4RQP/+/Z28vb1nkSqrra0VHdtE7I7i4mLqBqxarX6U6y1wjwCUSmUqKXoDwzC4dOmSVYaKiNiKS5cuEYflLi4uDl5eXh/c/XkXAYwcOVLu4+PzFKmB69evQ6PRWG2oiIgt0Gq1uHmTHILU29t7gUQi6fJw7yKAtra2v7q6unI6YBuNRhQUFAhiqIiIrSgoKCC+BVxdXRVDhgx5s/NnXQTg5uZG9KcuLi4WJ74idk9LSwt1jnp3H78jgMjIyP6enp4BXDcxDEPdcBARsScKCgpAcvL09PQMioyM7N/x8x0BODk5vUPyta6srERTU5PQdoqI2ISmpiaie45MJoOTk9PKjp/v9Hh3d/cppArFZU+R3gatz7q7uyd3/L8UAAYPHhyqUqncuC5ub2/HrVu3BDdQRMSWlJeXE3eHVSqVe3h4eDBwWwAKhWIh6bxleXk5DAaDrewUEbEJRqMRFRUVnGUSiQQuLi7PArcF4OjoOIlUUWVlpU0MFBGxNbS+6+TkNBm4LQClUhnJdRHDMOL5XpFeS3V1NXE1yNXVNQoApNHR0f2USiVnxhaNRiNGaRbpteh0OmIkQqVS6RwZGRngwDDMBFIFveXp7+3tjcTERISFhcHX1xceHh64desWSkpKkJOTg2PHjtmNkNVqNSZPnoyIiAgEBQUBYFcs8vPzsX//frv5zhUKBcaPH48hQ4YgKCgIvr6+qKurw61bt1BYWIjMzMxecRKwuroaKpXqns8lEgnkcvkEB7lcPop0sz37/UilUsybNw8LFy7E6NGjIZPJiNc2NDTgwIEDWLNmDS5cuNCNVv6P4cOHY9WqVUhKSiLaajAYcOjQIbzxxhsmj/vZitjYWCxfvhxTpkzh7DgdGAwGnDp1Chs3bsRXX31lt2dDaLFopVLpKKlcLh9CuqCxsVvylJlNfHw8zp49i61bt2LMmDHUzg+woTQef/xxZGdn49///jf8/Py6yVJALpdj/fr1yMrKwqRJk6i2ymQyTJ48GVlZWUhNTTU7iYQ1+Pv7Y8uWLcjOzsbjjz9O7fwAa+uYMWOwbds2ZGVlYezYsd1kqXnQ+rBCoYiWKhSKUNIF9rj7u3DhQmRmZmLoUN55OO4glUrx1FNP4ezZs4iLi7OBdV1RqVTIzMzEn/70J7OSQEilUixevBgZGRlwc+PcnhGUoUOH4scff8T8+fN5x+jvzLBhw3DkyBEsX77cBtZZhwkB9JfK5XJOqXckpLMnUlNTsXHjRmpkMD4EBATg2LFjmDhxokCW3YtUKsW2bdswbtw4i+uIj4/Hjh07TL7hrGHixIn44Ycf0K9fP6vqkclkePfdd5GamiqQZcKg0+mIG2IODg4PSGUyGaf7s711/hdffBGLFy8WrD4XFxfs3LkT0dHUqBkW89prryE5Odn0hSZ45JFH8PLLxMg0VhEZGYnt27fDxYUY88xsFi9ejOeff16w+oSA1JdlMplckpiY2O7k5HTPI7W+vh4nT/IKrmVz4uPj8f3339vkSVhQUIDY2FhBUzV5e3ujoKDA5DiaL1qtFhEREYK6pLi4uODixYsIC7MokScVg8GAhIQEnDhxQvC6LWHs2LFwd3e/5/OWlha9VEboVXq93uaG8UEqlWLdunXUzq/T6bBhwwYkJCTA19cXjo6OCAkJwfz583H8+HFq/eHh4Vi2bJmgNi9dupTa+Y8ePYrExMQ7aUSTkpJw7Ngx4vVubm7485//LKiNL7zwgsnOf/ToUcyfPx8hISFwdHSEr68vJkyYgI0bN1JHCDKZDOvWrbNoPmELSH1ZJpPJMGXKFCY5Ofmef3FxcQyAHv/35JNPMjTOnTvHhIWFUeuYO3cuo9VqiXXU19czXl5egtmck5NDbOvTTz9lpFLpPfdIpVJm06ZNxPsuXrwomH1qtZppaGggtqXVapnZs2dT6wgPD2fOnz9P/dvMmzevx/sPACYuLo6zj0+ZMoWRklRqL+u6CxcuJJadP38eY8aMMXlYZ8eOHZg4cSLa2to4y93d3fHoo49aZWcHAQEBGDKEe2W5sLAQixcv5vxujUYjnnvuORQVFXHeGxMTI9jy7WOPPUZ8Q7W2tiIpKQm7du2i1lFQUIAxY8bg4sWLxGtof7vuhNSXpVIpv+jQPYW3tzdGjx7NWdbW1obZs2fz3qs4deoU3niDnDxkxowZFtl4N8HBwcSyrVu3UocOOp0OW7duJZZbu1LTAe13XbFiBU6fPs2rHq1Wi9mzZxN/p9/85je8kt31JHYtANqu6RdffGH2Mc3U1FSii+y4cePg7MwrzTEVX19fYhmfoAL5+fkW1c0XFxcX4qZVeXk5PvnEvKyj+fn5+PLLLznLZDIZEhMTzTWxW7FrAfTv359YtmPHDrPra2trw969eznLFArFHd8ca6C5j3h5eZm8n3aNEK4pQUFBxB3mPXv2WLT8Tftb0P6G9oBdCyAggPOMPgBQx540aIG9aO3xpaysjFjGZ19g2rRpxLLS0lKLbOpMYGAgscxev1NbYtcCoLkBWOqnREvk8cADD1hUZ2cKCwtRXV3NWfbb3/4Ws2fPJt47Z84cJCQkcJZVVlYSJ8jmQFuetcV3yrX+bk/YtQBoJ3osHQ/7+/sTy+rr6y2qszMGgwHp6enE8m3btmHJkiVdhiFyuRzLli2jToDT09MFOZpK+x1t8Z3a+3lyuxYAbThhqfch7b7i4mKL6rybDRs2EE8iOTo64uOPP0Z5eTkOHjyIgwcPory8HB9++CEx8RvDMNiwYYMgttGiJdjiO6X9De0BuxYAbWz5zDPPmF1fv379iKsSVVVVuHbtmtl1cnHmzBl888031Gu8vLzwyCOP4JFHHjE5Od6+fTt+/vlnQWy7du0a8SBLUlISdRmXBO1vYem8oruwawEcO3aMOL4cN24c5s6dy7suiUSCDz74gPiUzcjIsMhGEkuWLBHkjXLz5k1BncsYhkFmZiZnmZOTE95//32zXLcfe+wxjBkzhrOsvr7epCtKT2PXAtDpdDhw4ACxfPPmzcSNss5IJBKsXLkSs2YR0x5gy5YtFtlIoqKiAjNmzLAqe2Z9fT1mzJgheGQO2u86Z84crFy5kpcIHn74YXz22WfE8v3791PTmdoDdi0AAFizZg1xK1upVOLIkSN4+eWXiU/2fv36YefOndRd4HPnzhGfitaQnZ2NuLg4XLlyxex7CwoKMHr0aJscjczMzMT58+eJ5W+88QZ27txJ3Hl2cnLCq6++isOHD8PV1ZXzGoPBgDVr1ghiry2x7mRJN3DhwgVs27YNTz3FnbbA0dER7733Hl566SXs3bsXOTk5qKurg7+/P8aNG4fExERqVnGAdVEgTVqtJT8/H7/+9a/x6quv4oUXXiB2mA6amprw/vvv47333rPZkVSj0YitW7dST9XNmjULU6dORUZGBk6cOIHy8nJ4eHggJiYG06dPN7litHXr1l6RTEWSnJzM+ZcvKyvD2bNnu9seTvz8/PDzzz8LslPLRXNzM6ZMmUJ1SRYCtVqN6dOnY9q0aRg4cGCXqBB5eXlIS0vD3r17ifsIQpGQkIC0tDRBD8J0pri4GHFxcXazBDpixAjihlyvEADAnjs9efKkySeopTQ3NyM5ORlHjhyxSf32wpgxY3DgwAEolUqb1N/S0oJx48YJtmolBDQB2P0coINz585h1qxZVk0qabi4uCAtLQ3jx4+3Sf32QEJCAr777jubdf6GhgbMnDnTrjq/KXqNAADg0KFDeOihh2yWqsnFxQXp6el9UgTjx4+36bDn6tWrGDVqFA4dOmST+m1FrxIAwGYGj42NxYoVK8x+G5w7dw4vvvgi9fxvXxTB+PHjkZ6eTu38zc3NeOmll6irQ1zU19fjL3/5C4YOHWrRaldP02vmAFx4eXnh0UcfxYwZMxAfH8/p5ltVVYWMjAxs2bIFmZmZYBgG8fHx2L9/v8kOMXXqVBw9etSWv4LN4dv5O35XiUSCxMRE/P73v0diYiK8vb3vuV6n0+H48ePYs2cPvv76a7sJ50iiT0yCTeHs7Izg4GD4+fnBy8sLNTU1KCkpIbo38OkY9fX1GDJkiN37s5AIDAxETk4O1cvVlNDDwsIQGBh45zutqKhAcXExWlpabGW24NAEYPf7AHxpaWnB1atXcfXqVV7XHz16FFOnTqWK4IEHHsDChQvx1ltvCWhp95GSkmKy8ycnJ1PfcoWFhX06QWKvmwMISYcIaHMC2gESe4dm+/2y7GuK+1oAgGkR/PTTT91skXCcOXOG83Ox8/+PPjMEsoYOEezevbvLkOHkyZP44osvBGnDFUAsgDAAIQC8bn8GAE0AagDcAFAA4OLtz6zl888/x5NPPtnFW7O+vh6zZs0SO/9tRAHc5ujRoxgyZAhSUlIQGBiIn376CZ9//rlVEfL6A5gDYAKAYeD/ZesBZAM4DGAngOsWtq/X65GQkICnn34acXFxKC0txcaNG3vtpN4W9JlVIHtBAiAJwBIAD93+2RoYAKcAfAzg+9s/i5hHn3CF6A08DOAYgK8AjIb1nR+363gYwNcAjgAgpvMRsQhRAAKgArAewD4AMTZsJxbAfgCpAGyfNuP+QBSAlQwF+9R/DMI88U0hAfAEgKMAftUN7fV1RAFYwSMADgAI7YG2BwA4BMD6FBz3N+IqkIU8CnYowvcLLAI7hv8RQD6AEgAd+QvdAAQBGAh2jJ8AfqJyBLAZwHNgV4tEzEcUgAVMAr/ObwCwB8AmALTttNrb/y4C2AV2mPNrAM8CmA6AlhfHAcAnADRg3wgi5iEOgcxkKNinrqnOfxTsMuizoHd+LhgAZwA8A3YFyFRgEQcAn0OcE1iCKAAzUIHtaE6Ua1oBPA9gFthdXWu5CmAmgJcAcKf3YHEGK0xxdcg8RAGYwbugj82rAUwBIGyEIZYvbtdN87wPA/CODdruy4gC4MnDYCe+JDo6v/BRfP5HNkyLYB6AB21oQ1+j10+C/f39ERoaCh8fH9TV1eHWrVsoKCgQJJJyBxKwT1bSOn8rWHGQc7sIx1Wwew7pYFeB7kYCYDVY/yMh3SZkMhnCw8Ph6+sLDw8P3Lp1Czdu3EB5ebmArXQ/vVIAPj4+WLJkCWbOnMmZkK66uhrp6enYsGED0SXYHJJA3+F9DbZ98t/NWQCvA/gHoXwoWAF8L0Bbo0aNQkpKCqZOncqZ7ysnJwfffvstUlNTiUF37Zle5Qwnk8mwfPlyvPbaa9TkGR0wDINvvvkGS5YsIeYG40M6WN8eLo6CnfD2BHsAkAKT/z8A5FwzpgkICEBqaipmzpzJK06oRqPB6tWrsXbtWrvJMNpBn3CGUyqV+Oabb/D3v/+dV+cH2KC4s2fPRlZWFuLi4ixqtz/Y5UwuDACWW1SrMCy/bQMXD4M9d2AJsbGxOH36NH73u9/xjhStUqmwevVqpKen231WmM70CgHI5XLs27cP06dPt+j+wMBAHD58mJi/l8YckMf+eyDMUqel5AFII5RJwNpuLpGRkTh27JjFKVknTZqEtLQ0KBQKi+7vbnqFAD766COr4/S4ubnh22+/5f326GACpexTqywShk2UMu5sY2Tc3d2Rnp5uda60MWPG4B//IM1Q7Au7F0BsbCxSUlIEqSsiIgKvvPIK7+uVYCeUXBQByBLCKCv5EcBNQtlwAObEgVu+fDnCwsKsNwrAc889h+HDhwtSly2xewGsXr0aUinZzMLCQrz11lt44oknsGzZMnz/PX3t48UXX+SdvfxXALgz6rKObfZwOosBawsXCvA/n+Dj44Nly5ZRr8nMzMSyZcvwxBNP4O2336amlJJKpVi5ciXP1nsOu14GVavVSEpKIpZv2rQJixcv7pLc+eOPP8bs2bOxbds2zrwArq6umD59OjZv3myyfdqz8EeTd3cfpwH8gVAWDtavyBTTp08nxkdqa2vDvHnzsHv37i6fv/POO1i/fj2efvppzvsmTpwIT09P1NbW8rCgZ7DrN8CkSZMgk3H7Qh49ehSLFi3izGy+a9cu6lCHT8JqgPW5J9Edm158odlC+x06Q/tOXn755Xs6P8CGSFy4cCExD5iDgwMmTZrE04Kewa4FMGjQIGLZO++8Q11v3rBhAzFmZWRkJK/2ySmlWX9+e4FmC98FSdJ3XVVVRU3RajQa8c47ZA8kvt91T2HXAiBtXgDAjz/SByHt7e3IyuKeptLq7Qwtir5tkhdZhpZSxjcTACnZ9dmzZ02Ghjl9+jSxjO933VPYtQBErKc7zin3ZuxaALQATg89RNqfZZHL5Rg5cqTZ9XaG9pS3TY4Vy6DtbJCjnnaF5NQ2cuRIODjQ10poqWrtPQiXXQsgLy+PWPb6669Tl0f//Oc/EzOw803koKGU2SZdn2XQbOGbbo/0XavVavzpT38i3ieTybBixQpiub0nzbBrARw4cIA4/hw3bhw2btzIueU+Z84crF27lljvvn37eLVPXuVmD7DbCzRbbvCsg/advPfee5g9e/Y9nzs6OuLTTz/F2LHcLnl6vR4HDx7kaUHPYNf7ADU1NcjIyMDkyZM5y5955hlMmDABW7duxdWrV6FWqzFt2jQkJJCdAJqamngLgObnMwrsAXZ7gDYY5Pv83bt3Lz766CPOvQBHR0fs3LkThw8fRlpaGqqrqzFw4EDMnz8f/fv3J9b53Xff2fUeANAL3KFjY2ORnZ1NHe6Yw9tvv8074YUr2LcA127wdQAj0PO7wRIA5wEEc5S1gT3CSTtL3JlVq1bh9ddfF8Quo9GIkSNH2iTTvbn0anfoCxcuYP369YLUlZeXZ5aTVhPIB11CwYYu6WlGgbvzA2w0Cr6dHwDWrl2L/HxhtvhSU1PtovObwu4FALD+O9bGs9doNPjd736HxkbzVvAPU8qetcoiYVhIKfvOzLo0Gg2Sk5NRX19vjUk4efIkXn31Vavq6C56hQDa29sxffp07Nmzx6L7S0tLMWHCBFy+fNnse3eCPMyZjp6dDA8GOTSiHsC3FtSZl5eH+Ph43LjBd/rclQMHDiA5OZnTRcUe6RUCAIDGxkbMmjULr732GjQa2gLl/2AYBtu3b8fw4cOJu8KmuA42Pj8XMgBrLKrVeiS32yb9AQ8CsPQQ6IULFzBq1Cjs2LEDDMNvltPQ0IBXXnkFycnJZudv7klkgwYNeourQKvV2t2Jf4Zh8MMPP+Czzz5Da2srPD094evre891VVVV2L59OxYtWoSPP/4YTU3WJRyqAXDvIiBLKIBKsBPR7uRZANw+mCyLAVjz12tsbMSuXbtw8OBByGQyBAUFwdXV9Z7rLl68iA0bNuDJJ5/E999/z1sw3UlAQADxIJTdrwKZws/PDyEhIfDz80NdXR3Ky8tx7do1wcOiHAEbn5+LNrDxerIFa5FOHNhcBFxhUQAgA2zoFCGRyWQICwuDn58fPDw8UFFRgRs3blgVbKC76NN5gisqKmz+R2AArAAbHYLLt8YRbAaXKWDj9tiSSLAZaEidvxm2OahvMBjMysPcW+g1c4Ce5jTYjkfCC2z2lhE2tCEOrAg9Kdf8Hfx3f0VEAZjFX0B3j/AC20H/KHC7ErBj/n2gd/7zsI+D+r0JUQBmoAU78WylXOMINmLbHgDk4zz8GQxgL9gVH9Kwp4NBIAfwEuFGFICZXAD7hDeVPXgsgB/AhiwfBfP88iVg/Xu+AHASwG943ucMYDuAMaYuFLlDr58E9wTfgU1L9AnoX6AUbGz/mWDH5UfBziXywYYy6diTdgPrzjAQbMcfD8CysFT/E8FjYMUjQkcUgIXsBHte4HOwnc4UIWAjN/zBdibdoUME82A6u8z9jjgEsoJDYPOFFfZA2xcAtFDKnQH8F+JwyBSiAKzkItgQhP9B97hGN4MNjf5bsMMcUyIQ5wR0RAEIgBbAEgCTYVuXiAywUZ//BTYq9EmIIrAWUQACcgbs22Au2Pj8QrwR9GAjQCeC7ex3b3KJIrAOcRLcicDAQKSkpCAwMBA//fQTNm/ebDImDhff3/4XAtaJbgLYHWJSnNG7aQMrpkMAdgO4ZeL6DhFsB3lCLq4OcdPrneGEYvz48di9e3eX0OAnT55EQkKCRSK4GxewgWojwArDC+yRSynY5dBqsE/3KwAuwbyTXB2MAV0EAPumuN9E0Ked4YRg/PjxSE9Pv+dA+JgxY7BgwQJs2kSLws+PZrBPdeszlpE5CfaNswOsuLgQl0i7ct/PAUidv4Nf/9oeTv7y5zTYOQjtBETHEum4brHIvrmvBWCq8wNASYk9hcHlhygC/vSZIZCzszNCQkLg4+MDLy8v1NbWoqSkBIWF3NtUfDp/XV2dIMOfnqBDBKaGQ//F/T0c6tUCUKvVeOyxxzBjxgyMHTsWcvm96yzV1dXIyMjAli1bkJmZCaPRiPj4eJOdv7m5GbNmzbL72JY0RBGYpleuArm4uOCFF17Aq6++CpWKFsW/K+fPn8eWLVuwatUqk51/6tSpOHr0qBDm9jgPgS4CgF0d6qsi6FOrQFFRUdi3b59FydyGDh2KoUNJae9Y+lrnB8Q3AY1eNQmeOHEiTp8+LVgmw7vpi52/A3FizE2vEcCwYcOwe/dus4Y85tDc3Izk5OQ+2fk7EEVwL71CAP7+/khLS6OO262h48lvbfhFU3h7e+PZZ59FWloarly5gsbGRjQ2NuLKlSvYt28fnn32WXh7e9vUBlEEXekVk+AtW7Zg/vz51GvKysqwd+9e5Obmoq6uDv7+/hg7diySkpLg5OREvfell17CBx98IKTJXXB3d8fy5cuxbNkykyJubm7GunXrsHbtWt4R8CyBz8RYA/aMce9dB2OhTYLtXgCmwqO3trZixYoV+OSTTzjjUQYHB+P999/HnDlziG2cP38ew4cPt0lUs4iICKSlpVEzXnJRWFiIadOmWRTPlC9jwYZ6ofkOrUHPhX8Uil4dHn1N0xW3AAAMqElEQVT58uXEzt/Y2IiEhASsW7eOGIy1uLgYc+fOxapVq4htDB06FImJiYLY25kRI0bg559/NrvzA0BYWBhOnTqF4cOHC25XBycAPA66K7V953i0HrsWgEKhwJQpU4jlCxYsoKbo7Mybb76JXbvIOV1+//vfm20fDT8/P3z77bdwd+ebqfde3N3dkZaWhsDAQAEt64opEfT8GMC22LUA4uPjias+x44do3bou2EYBi+99BLa2rgdjRMTEyGRCJdU9JNPPkFwMCl1BX8CAgJsOj8B/ieCu2M6nwY7Ge7L2LUAYmJiiGWbN282u76bN28iIyODs8zb25ua78ocRo0ahZkzZ1Kvqa6uxsGDB3Hw4EFUV9NzOc6ZM8fmXqknwE541wLYBuAFsPkPrD8JYd/YtQBoWcZPnDhhUZ20+4R4YgNASkoK8W3S1taGJUuWwN/fH5MnT8bkyZPh7++PpUuXEt9OEokEKSkpgthGoxzAuwCWAvg3+n7nB+zcFcLHx4dYduuWqYOC3NAiSXc+DWYpMpkMU6dOJZbPmzcPu3fv7vKZXq9HamoqysvLsXPnTs77kpOTIZPJBA37LmLnbwDaOrilO8K0Tl5XV2dRnZ0JCwuDWq3mLMvMzLyn83dm165dOHyYOyuZt7c3BgwYYLV9Il2xawHQMtTQ5gc0aPcJkRGHNmxLS0szeT/tGlrdIpZh1wIoKioils2dO9fs+pycnDB9+nTOMp1OJ8jpL9qbiU/S6JqaGovqFrEMuxZARkYGccy7YMEChIeHm1Xf0qVLOXOKAeyyaksLbUuIH7S5SUREhMn7Bw4k5520dN4jQsauBVBVVYVTp7hzNCoUCuzatYuY/OxuHn74YaxcuZJYvnfvXotsvJvi4mJi2fz586FQKIjljo6OVJ+nmzdvWmWbyL1IjUYjdwHB/aC72bhxI7EsNjYWJ0+eNPlkffzxx3Ho0CE4OnKnmKivr8fXX39tlZ0dlJWVIScnh7NswIABWL9+Ped3K5PJsH79eoSGhnLee/HixV6RkM4eIfVlo9EIqcFg4PQAc3CwjxXSr776CufOnSOWx8bGIicnBxs3bsSECRPg6+sLhUKBkJAQPPXUUzhx4gT++9//cqb47GDNmjXUsbe5fPstOUX1008/jSNHjiApKQlKpRJubm6YOHEijhw5gj/+kZxciVanCB2us+IAYDAYjJLExMR2Jyene3p7fX09Tp60j/hhY8eOxZEjRyCTyQSvOz8/H7GxsYKM/ztQq9UoLCwUbNLa0NCA8PBwkzvGItyMHTuW0yerpaVFLzUQZpn28gYA2N3bV155RfB6tVotZs6cKWjnB1g3h9WrVwtW36pVq8TObwWkvmwwGPRSg8HA6UdMm6z1BOvWrcM///lPweprbm7GnDlzkJubK1idnVm7di327dtndT179+61uTNcX4fUlw0GQ7u0vb39bifAOzfZmwiWLFmClJQUq4PVlpaWIj4+HocOHRLIsnsxGo148sknrTpjfOTIEcyfPx+khQoR0zg6OhLnAHq9vk6q0+mIeZWVSqXNDLOUTz/9FBMmTEB2drbZ9xqNRnz55Zd3DqrYGq1Wi4kTJyI1NdWsTmw0GvHRRx9h4sSJ0Gq1NrSw70Nb/NDpdNel7e3txDEA7eae5MSJE4iLi8MTTzyB48ePm3QQq6+vx3/+8x8MHToUCxYs6NYNpfb2dixduhQjRozA/v37qW8vvV6P9PR0DBs2DM8//7wgYdnvd2gPcZ1OlyOJiopaEBYW9jnXBdeuXbPZGFlI1Go1EhMTERYWBl9fX3h4eODWrVsoLi5Gbm4ujh07hvb29p42EwDg6emJyZMnY+DAgQgODgbDMCgpKcHVq1dx4MABXu4SIvyJjo4mnvMoKCh4ShIeHh40ePBgzu1LjUaD48fvpzhhIn2NcePGcS5HMwyD/Pz8QGl+fn5JY2NjK9fNbm5udjcRFhHhi6OjI3EvprGxseXKlStl0ts/XOG6SCKRwMvLy4YmiojYDlrfbWpqugzcdoZraWk5SLqQdipLRMSeofXd1tbW/cBtATAMs4EUFCogIMAmLggiIrZEJpPBz8+Ps4xhGDQ3N38G3BZATk7OzYaGBs7zhw4ODkQfehERe8XPz4+4AdbQ0NBQUFBQDHQ6D6DVaveTKgsKChLcQBERW0KL8KHVau8c/rgjgJaWltdJu5U+Pj52uykmInI3rq6uxMAEBoMB9fX1f+34+Y4Arly5UlRbW1vKdZNEIjH7+KGISE8RERFBjMtUW1tbUlRUdL3j5y5HZTQazRekSoOCguDsTIsjLCLS8zg7O1NjqWq12i59vIsAcnNz325qauIMTyaVSsW3gIjdExERQYsmrsvNze1yMLzLlQzD6CsrK7eSKg8JCRFDc4jYLSqVCv369SOW19TUfM4wTBcPw3uk0tjYuKS5uZnTc0wikVgckEpExNbExMQQx/7Nzc362traF+/+/B4BFBUVtVZVVRHjjnt6egoWRFZERCj69esHT09PYnlVVdVXxcXF95x95RwsVVdXP9PY2Mgdqhhsrl5xQixiLzg7OyMqKopYrtVq26qrqxdxlXEKoLS0tLm0tPRlknuEQqHAiBEj7CZ2kMj9i0QiwfDhw4m7vgzDoKKi4uXS0tJmrnJiD87Ly/tnZWUlMTinh4eHRbmvRESEZPDgwdShT2VlZcEvv/xCjKZAfYTX1NRM0+l0xNSJ4eHhYsRikR4jICAAYWFhxHKdTsfU1tZyR0O+DVUABQUFOSUlJWtp1wwbNszmyZ1FRO5GrVZj2LBh1GtKSko+zM/Pp+aZlfDJjRsXF3fJ398/mlSu1+tx6tQpNDRwRlgREREUlUqF0aNHE8f9AFBWVnYxKysr1lRdvGax9fX1D2s0Gs5JBMC6TD/44INWpQQVEeGDu7s7Ro0aRe38DQ0NzXV1dWP51MdLAPn5+Zry8nLqfMDR0RGjR48meuGJiFiLWq3G6NGjiVG+AaCtrY0pKyubUlhYyGs4wnsdMy8v7/CNGzdSaLFqOt4E4sRYRGgCAwPx4IMPUmPW6vV6XL9+PSU/P/8Y33p5zQE6ExUV9caAAQP+ZmoPoKioCJcvXxbD+olYhUQiwcCBA6kuzgAbTe/atWt/u3z58ptm1W+uAAAgJiZmU2ho6DOmMqvX1tYiOztb8OjLIvcHzs7OGDFiBDw8PKjXMQyDa9eufZGbm0tOsEDAIgEAQGxs7L+Cg4MXmXoTtLe3Izc3l5o6SETkbvr164eoqCjqZBdgn/zXr1/fnJOT84wl7VgsAACIior6v9DQ0JV8cgnU1tbi0qVL1Ny/IiIqlQoxMTHU3d0Obo/5/3758uU3LG3PKgEAwODBgxeGhIRsUCgU9PEQ2FfVjRs3UFBQIA6LRLrg7OyMiIgI9OvXjzrW76CtrY25fv16Sl5e3iZr2rVaAAAQHh4+PiAgIP2BBx5w4XO90WhEWVkZ8vPz0djYaHX7Ir0XFxcXDBgwACEhIbydKzUaTXN5efm0vLy8w9a2L4gAACAyMtJNpVId9/Pzo+9Pd4JhGFRVVaGkpAQVFRUmw5yL9A06glYFBQXB29ub1xO/g9LS0sv19fWj+a7zm0IwAXQQHR39flBQ0At8hkSd0ev1KC8vR2VlJaqrq6HTcWZuEumlKBQKqNVq+Pj4wN/f3+wcdDqdjikuLn4/NzdX0GRxggsAACIiIoZ4eXnt8fHxsfgUvUajQXV1NbRaLRobG9HY2CiKopegUCigVCqhVCqhUqng5eVl8Vny2/78N2tqapKvXbt2UWBTbSOADqKiop7z8/P7h1KpdBKivvb2duh0Ouj1erS3t0Ov14sbbT2MVCqFg4MD5HI5HBwcoFAoTC5d8kWr1baVl5cvv3LlykeCVMiBTQUAAMHBwc5eXl6b1Gr1oy4uLvaTe1XEbmlubm6vqqrartFoFhYVFXHmrhAKmwugg4iICEe5XP6ur6/vIjc3N0HeCCJ9i6amJl1lZeXOqqqqlIqKiqbuaLPbBHCnQYnEYciQIW+qVKo/enh4BIqh1+9vDAYDamtrS7Ra7Re5ubkr747bY2u6XQCdGTx4cKijo+Pf3N3dk1Uqlbs5y2EivReGYaDRaBoaGhr2tbW1vfnLL79c7ylbelQAnQkPD/dxcXH5g5OT00ylUvkrV1dXF1EQfYeWlpZ2rVZb0Nra+l1bW9vHPdnpO2M3AribyMjIALlcPkEqlY6Sy+Uxjo6OoQ4ODg/IZDK5TCaTOTg4OMhkMokYmqVnMRqNMBgMjF6v1xtY2vV6fX1bW9t1vV5/UafTnTEajYevXLlS1tO2cvH/AfuGwjT6lpqBAAAAAElFTkSuQmCC @@ -81,8 +94,6 @@ spec: - "" resources: - configmaps - - persistentvolumeclaims - - persistentvolumes - secrets - services verbs: @@ -90,28 +101,29 @@ spec: - delete - get - list + - patch - update - watch - apiGroups: - - apps + - "" resources: - - deployments + - persistentvolumeclaims + - persistentvolumes verbs: - - create - - delete - get - list - - update - watch - apiGroups: - apps resources: + - deployments - statefulsets verbs: - create - delete - get - list + - patch - update - watch - apiGroups: @@ -144,11 +156,13 @@ spec: - route.openshift.io resources: - routes + - routes/custom-host verbs: - create - delete - get - list + - patch - update - watch - apiGroups: @@ -235,12 +249,12 @@ spec: fieldRef: fieldPath: metadata.name - name: RELATED_IMAGE_backstage - value: registry-proxy.engineering.redhat.com/rh-osbs/rhdh-rhdh-hub-rhel9:1.2 + value: registry-proxy.engineering.redhat.com/rh-osbs/rhdh-rhdh-hub-rhel9:1.3 - name: RELATED_IMAGE_postgresql value: registry.redhat.io/rhel9/postgresql-15:latest command: - /manager - image: registry-proxy.engineering.redhat.com/rh-osbs/rhdh-rhdh-rhel9-operator:1.2 + image: registry-proxy.engineering.redhat.com/rh-osbs/rhdh-rhdh-rhel9-operator:1.3 livenessProbe: httpGet: path: /healthz @@ -261,7 +275,7 @@ spec: memory: 1Gi requests: cpu: 10m - memory: 64Mi + memory: 128Mi securityContext: allowPrivilegeEscalation: false capabilities: @@ -337,10 +351,10 @@ spec: maintainers: - email: asoro@redhat.com name: Armel Soro + - email: cdaley@redhat.com + name: Corey Daley - email: gazarenk@redhat.com name: Gennady Azarenkov - - email: jianrzha@redhat.com - name: Jianrong Zhang - email: nboldt@redhat.com name: Nick Boldt maturity: alpha @@ -348,5 +362,5 @@ spec: provider: name: Red Hat Inc. url: https://www.redhat.com/ - version: 1.2.0 + version: 1.3.0 replaces: rhdh-operator.v1.1.1 diff --git a/.rhdh/docker/Dockerfile b/.rhdh/docker/Dockerfile index a5806949..6973aa2e 100644 --- a/.rhdh/docker/Dockerfile +++ b/.rhdh/docker/Dockerfile @@ -13,7 +13,7 @@ # limitations under the License. #@follow_tag(registry.redhat.io/rhel9/go-toolset:latest) -FROM registry.access.redhat.com/ubi9/go-toolset:1.20.12-3.1712567214 AS builder +FROM registry.access.redhat.com/ubi9/go-toolset:1.21.11-2 AS builder # hadolint ignore=DL3002 USER 0 ENV GOPATH=/go/ @@ -54,7 +54,7 @@ RUN export ARCH="$(uname -m)" && if [[ ${ARCH} == "x86_64" ]]; then export ARCH= # Install openssl for FIPS support #@follow_tag(registry.redhat.io/ubi9/ubi-minimal:latest) -FROM registry.access.redhat.com/ubi9-minimal:9.3-1612 AS runtime +FROM registry.access.redhat.com/ubi9-minimal:9.4-1134 AS runtime RUN microdnf update --setopt=install_weak_deps=0 -y && microdnf install -y openssl; microdnf clean -y all # Upstream sources diff --git a/.rhdh/docs/installing-ci-builds.adoc b/.rhdh/docs/installing-ci-builds.adoc index 0b0b881d..9b2145fb 100644 --- a/.rhdh/docs/installing-ci-builds.adoc +++ b/.rhdh/docs/installing-ci-builds.adoc @@ -9,28 +9,19 @@ WARNING: The procedure below will not work properly on OpenShift clusters with h *Procedure* -. Add your Quay token to the cluster global pull secret (link:https://docs.openshift.com/container-platform/4.14/openshift_images/managing_images/using-image-pull-secrets.html#images-update-global-pull-secret_using-image-pull-secrets[link]): +. Run the link:../scripts/install-rhdh-catalog-source.sh[installation script] to create the RHDH Operator CatalogSource in your cluster. By default, it installs the Release Candidate or GA version (from the `1.yy.x` branch), but the `--next` option allows to install the current development build (from the `main` branch). For example: + [source,console] ---- -$ oc get secret/pull-secret -n openshift-config --template='{{index .data ".dockerconfigjson" | base64decode}}' > /tmp/my-global-pull-secret.yaml -$ oc registry login --registry="quay.io" --auth-basic=":" --to=/tmp/my-global-pull-secret.yaml +cd /tmp +curl -sSLO https://raw.githubusercontent.com/janus-idp/operator/main/.rhdh/scripts/install-rhdh-catalog-source.sh +chmod +x install-rhdh-catalog-source.sh -$ oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=/tmp/my-global-pull-secret.yaml +# install catalog source and operator subscription, for the latest stable RC or GA from 1.yy.x branch +./install-rhdh-catalog-source.sh --latest --install-operator rhdh -$ rm -f /tmp/my-global-pull-secret.yaml ----- - -. Run the link:../scripts/install-rhdh-catalog-source.sh[installation script] to create the RHDH Operator CatalogSource in your cluster. By default, it installs the Release Candidate version, but the `--next` option allows to install the current development build (from the `main` branch). For example: -+ -[source,console] ----- -$ cd /tmp -$ curl -sSLO https://raw.githubusercontent.com/janus-idp/operator/main/.rhdh/scripts/install-rhdh-catalog-source.sh -$ chmod +x install-rhdh-catalog-source.sh -$ ./install-rhdh-catalog-source.sh --latest # install only the catalog source -# or -$ ./install-rhdh-catalog-source.sh --latest --install-operator rhdh # install catalog source and operator subscription +# OR, install catalog source and operator subscription, for the next CI build from main branch +./install-rhdh-catalog-source.sh --next --install-operator rhdh ---- . If you did not create a subscription in the previous step, you can do so now. In the *Administrator* perspective of the OpenShift web console, go to *Operators* → *OperatorHub*, search for Red Hat Developer Hub, and install the Red Hat Developer Hub Operator. For more info, see link:https://docs.openshift.com/container-platform/4.14/operators/admin/olm-adding-operators-to-cluster.html#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from OperatorHub using the web console]. diff --git a/.rhdh/docs/openshift.adoc b/.rhdh/docs/openshift.adoc index 7d8ad920..98e8c93a 100644 --- a/.rhdh/docs/openshift.adoc +++ b/.rhdh/docs/openshift.adoc @@ -49,8 +49,11 @@ data: title: Red Hat Developer Hub backend: auth: - keys: - - secret: "${BACKEND_SECRET}" + externalAccess: + - type: legacy + options: + subject: legacy-default-config + secret: "${BACKEND_SECRET}" baseUrl: https://backstage--. cors: origin: https://backstage--. @@ -72,8 +75,11 @@ data: baseUrl: https://backstage-backstage-sample-my-ns.apps.ci-ln-vtkzr22-72292.origin-ci-int-gce.dev.rhcloud.com backend: auth: - keys: - - secret: "${BACKEND_SECRET}" + externalAccess: + - type: legacy + options: + subject: legacy-default-config + secret: "${BACKEND_SECRET}" baseUrl: https://backstage-backstage-sample-my-ns.apps.ci-ln-vtkzr22-72292.origin-ci-int-gce.dev.rhcloud.com cors: origin: https://backstage-backstage-sample-my-ns.apps.ci-ln-vtkzr22-72292.origin-ci-int-gce.dev.rhcloud.com diff --git a/.rhdh/scripts/install-rhdh-catalog-source.sh b/.rhdh/scripts/install-rhdh-catalog-source.sh index 2e5415cb..f92472f2 100755 --- a/.rhdh/scripts/install-rhdh-catalog-source.sh +++ b/.rhdh/scripts/install-rhdh-catalog-source.sh @@ -99,7 +99,24 @@ while [[ "$#" -gt 0 ]]; do shift 1 done +# check if the IIB we're going to install as a catalog source exists before trying to install it +if [[ ! $(command -v skopeo) ]]; then + errorf "Please install skopeo 1.11+" + exit 1 +fi + +# shellcheck disable=SC2086 +UPSTREAM_IIB_MANIFEST="$(skopeo inspect docker://${UPSTREAM_IIB} --raw || exit 2)" +# echo "Got: $UPSTREAM_IIB_MANIFEST" +if [[ $UPSTREAM_IIB_MANIFEST == *"Error parsing image name "* ]] || [[ $UPSTREAM_IIB_MANIFEST == *"manifest unknown"* ]]; then + echo "$UPSTREAM_IIB_MANIFEST"; exit 3 +else + echo "[INFO] Using iib from image $UPSTREAM_IIB" + IIB_IMAGE="${UPSTREAM_IIB}" +fi + TMPDIR=$(mktemp -d) +# shellcheck disable=SC2064 trap "rm -fr $TMPDIR" EXIT # Add ImageContentSourcePolicy to resolve references to images not on quay as if from quay.io @@ -167,8 +184,6 @@ spec: source: registry-proxy.engineering.redhat.com " > "$TMPDIR/ImageContentSourcePolicy_${ICSP_URL_PRE}.yml" && oc apply -f "$TMPDIR/ImageContentSourcePolicy_${ICSP_URL_PRE}.yml" -echo "[INFO] Using iib from image $UPSTREAM_IIB" -IIB_IMAGE="${UPSTREAM_IIB}" CATALOGSOURCE_NAME="${TO_INSTALL}-${OLM_CHANNEL}" DISPLAY_NAME_SUFFIX="${TO_INSTALL}" @@ -191,7 +206,7 @@ spec: image: ${IIB_IMAGE} publisher: IIB testing ${DISPLAY_NAME_SUFFIX} displayName: IIB testing catalog ${DISPLAY_NAME_SUFFIX} -" > $TMPDIR/CatalogSource.yml && oc apply -f $TMPDIR/CatalogSource.yml +" > "$TMPDIR"/CatalogSource.yml && oc apply -f "$TMPDIR"/CatalogSource.yml if [ -z "$TO_INSTALL" ]; then echo "Done. Now log into the OCP web console as an admin, then go to Operators > OperatorHub, search for Red Hat Developer Hub, and install the Red Hat Developer Hub Operator." @@ -205,7 +220,7 @@ kind: OperatorGroup metadata: name: rhdh-operator-group namespace: ${NAMESPACE_SUBSCRIPTION} -" > $TMPDIR/OperatorGroup.yml && oc apply -f $TMPDIR/OperatorGroup.yml +" > "$TMPDIR"/OperatorGroup.yml && oc apply -f "$TMPDIR"/OperatorGroup.yml # Create subscription for operator echo "apiVersion: operators.coreos.com/v1alpha1 @@ -219,4 +234,4 @@ spec: name: $TO_INSTALL source: ${CATALOGSOURCE_NAME} sourceNamespace: ${NAMESPACE_CATALOGSOURCE} -" > $TMPDIR/Subscription.yml && oc apply -f $TMPDIR/Subscription.yml +" > "$TMPDIR"/Subscription.yml && oc apply -f "$TMPDIR"/Subscription.yml diff --git a/Makefile b/Makefile index 2ee5b04a..d623e29f 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 0.2.0 +VERSION ?= 0.3.0 # Using docker or podman to build and push images CONTAINER_ENGINE ?= docker @@ -134,7 +134,7 @@ test: manifests generate fmt vet envtest ## Run tests. We need LOCALBIN=$(LOCALB .PHONY: integration-test integration-test: ginkgo manifests generate fmt vet envtest ## Run integration_tests. We need LOCALBIN=$(LOCALBIN) to get correct default-config path mkdir -p $(LOCALBIN)/default-config && cp config/manager/$(CONF_DIR)/* $(LOCALBIN)/default-config - LOCALBIN=$(LOCALBIN) KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" $(GINKGO) -v -r integration_tests + LOCALBIN=$(LOCALBIN) KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" $(GINKGO) -v -r $(ARGS) integration_tests ##@ Build @@ -363,25 +363,53 @@ release-build: bundle image-build bundle-build catalog-build ## Build operator, .PHONY: release-push: image-push bundle-push catalog-push ## Push operator, bundle + catalog images +# It has to be the same namespace as ./config/default/kustomization.yaml -> namespace +OPERATOR_NAMESPACE ?= backstage-system +OLM_NAMESPACE ?= olm +OPENSHIFT_OLM_NAMESPACE = openshift-marketplace + .PHONY: deploy-olm deploy-olm: ## Deploy the operator with OLM - kubectl apply -f config/samples/catalog-operator-group.yaml - sed "s/{{VERSION}}/$(subst /,\/,$(VERSION))/g" config/samples/catalog-subscription-template.yaml | sed "s/{{DEFAULT_OLM_NAMESPACE}}/$(subst /,\/,$(DEFAULT_OLM_NAMESPACE))/g" | kubectl apply -f - + kubectl -n ${OPERATOR_NAMESPACE} apply -f config/samples/catalog-operator-group.yaml + sed "s/{{VERSION}}/$(subst /,\/,$(VERSION))/g" config/samples/catalog-subscription-template.yaml | sed "s/{{OLM_NAMESPACE}}/$(subst /,\/,$(OLM_NAMESPACE))/g" | kubectl -n ${OPERATOR_NAMESPACE} apply -f - + +.PHONY: deploy-olm-openshift +deploy-olm-openshift: ## Deploy the operator with OLM + kubectl -n ${OPERATOR_NAMESPACE} apply -f config/samples/catalog-operator-group.yaml + sed "s/{{VERSION}}/$(subst /,\/,$(VERSION))/g" config/samples/catalog-subscription-template.yaml | sed "s/{{OLM_NAMESPACE}}/$(subst /,\/,$(OPENSHIFT_OLM_NAMESPACE))/g" | kubectl -n ${OPERATOR_NAMESPACE} apply -f - + .PHONY: undeploy-olm undeploy-olm: ## Un-deploy the operator with OLM - -kubectl delete subscriptions.operators.coreos.com backstage-operator - -kubectl delete operatorgroup backstage-operator-group - -kubectl delete clusterserviceversion backstage-operator.$(VERSION) + -kubectl -n ${OPERATOR_NAMESPACE} delete subscriptions.operators.coreos.com backstage-operator + -kubectl -n ${OPERATOR_NAMESPACE} delete operatorgroup backstage-operator-group + -kubectl -n ${OPERATOR_NAMESPACE} delete clusterserviceversion backstage-operator.v$(VERSION) -DEFAULT_OLM_NAMESPACE ?= openshift-marketplace .PHONY: catalog-update catalog-update: ## Update catalog source in the default namespace for catalogsource - -kubectl delete catalogsource backstage-operator -n $(DEFAULT_OLM_NAMESPACE) - sed "s/{{CATALOG_IMG}}/$(subst /,\/,$(CATALOG_IMG))/g" config/samples/catalog-source-template.yaml | kubectl apply -n $(DEFAULT_OLM_NAMESPACE) -f - + -kubectl delete catalogsource backstage-operator -n $(OLM_NAMESPACE) + sed "s/{{CATALOG_IMG}}/$(subst /,\/,$(CATALOG_IMG))/g" config/samples/catalog-source-template.yaml | kubectl apply -n $(OLM_NAMESPACE) -f - + +.PHONY: catalog-update +catalog-update-openshift: ## Update catalog source in the default namespace for catalogsource + -kubectl delete catalogsource backstage-operator -n $(OLM_NAMESPACE) + sed "s/{{CATALOG_IMG}}/$(subst /,\/,$(CATALOG_IMG))/g" config/samples/catalog-source-template.yaml | kubectl apply -n $(OPENSHIFT_OLM_NAMESPACE) -f - + +# Deploy on Openshift cluster using OLM (by default installed on Openshift) .PHONY: deploy-openshift -deploy-openshift: release-build release-push catalog-update ## Deploy the operator on openshift cluster +deploy-openshift: release-build release-push catalog-update-openshift create-operator-namespace deploy-olm-openshift ## Deploy the operator on openshift cluster + +.PHONY: install-olm +install-olm: operator-sdk + $(OPSDK) olm install + +.PHONY: create-operator-namespace +create-operator-namespace: + -kubectl create namespace ${OPERATOR_NAMESPACE} + +.PHONY: deploy-k8s-olm +deploy-k8s-olm: release-build release-push catalog-update create-operator-namespace deploy-olm ## Deploy the operator on openshift cluster # After this time, Ginkgo will emit progress reports, so we can get visibility into long-running tests. POLL_PROGRESS_INTERVAL := 120s @@ -406,7 +434,11 @@ $(GINKGO): $(LOCALBIN) .PHONY: test-e2e test-e2e: ginkgo ## Run end-to-end tests. See the 'tests/e2e/README.md' file for more details. - $(GINKGO) $(GINKGO_FLAGS) tests/e2e + $(GINKGO) $(GINKGO_FLAGS) --skip-file=e2e_upgrade_test.go tests/e2e + +.PHONY: test-e2e-upgrade +test-e2e-upgrade: ginkgo ## Run end-to-end tests dedicated to the operator upgrade paths. See the 'tests/e2e/README.md' file for more details. + $(GINKGO) $(GINKGO_FLAGS) --focus-file=e2e_upgrade_test.go tests/e2e show-img: @echo -n $(IMG) diff --git a/OWNERS b/OWNERS index 25958e38..f2b270d4 100644 --- a/OWNERS +++ b/OWNERS @@ -1,7 +1,7 @@ # if adding/removing approvers, remember to update pr-container-build.yaml to add/remove them from the authorize job approvers: + - coreydaley - gazarenkov - - jianrongzhang89 - kadel - nickboldt - rm3l diff --git a/PROJECT b/PROJECT index b119bbc8..db3c7202 100644 --- a/PROJECT +++ b/PROJECT @@ -13,6 +13,6 @@ resources: controller: true domain: rhdh.redhat.com kind: Backstage - path: redhat-developer/red-hat-developer-hub-operator/api/v1alpha1 - version: v1alpha1 + path: redhat-developer/red-hat-developer-hub-operator/api/v1alpha2 + version: v1alpha2 version: "3" diff --git a/README.md b/README.md index 64080d3c..e3cd3342 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,9 @@ Output: - [Developer Guide](docs/developer.md) - [Operator Design](docs/developer.md) +## Telemetry data collection + +The telemetry data collection feature is enabled by default. The default configuration uses image with backstage-plugin-analytics-provider-segment plugin enabled and configured. To disable this and to learn what data is being collected, see https://github.com/janus-idp/backstage-showcase/blob/main/showcase-docs/getting-started.md#telemetry-collection ## License diff --git a/api/v1alpha1/backstage_types.go b/api/v1alpha1/backstage_types.go index 5fd7d794..61332c55 100644 --- a/api/v1alpha1/backstage_types.go +++ b/api/v1alpha1/backstage_types.go @@ -197,6 +197,7 @@ type BackstageStatus struct { //+kubebuilder:object:root=true //+kubebuilder:subresource:status +//+kubebuilder:deprecatedversion:warning="Since 1.3.0 spec.application.image, spec.application.replicas, spec.application.imagePullSecrets are deprecated in favor of corresponding spec.deployment fields" // Backstage is the Schema for the backstages API type Backstage struct { @@ -259,6 +260,8 @@ type TLS struct { // chain. Do not include a CA certificate. The secret referenced should // be present in the same namespace as that of the Route. // Forbidden when `certificate` is set. + // Note that securing Routes with external certificates in TLS secrets is a Technology Preview feature in OpenShift, + // and requires enabling the `RouteExternalCertificate` OpenShift Feature Gate and might not be functionally complete. // +optional ExternalCertificateSecretName string `json:"externalCertificateSecretName,omitempty"` diff --git a/api/v1alpha2/backstage_types.go b/api/v1alpha2/backstage_types.go new file mode 100644 index 00000000..d94976a5 --- /dev/null +++ b/api/v1alpha2/backstage_types.go @@ -0,0 +1,312 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +type BackstageConditionReason string + +type BackstageConditionType string + +const ( + BackstageConditionTypeDeployed BackstageConditionType = "Deployed" + + BackstageConditionReasonDeployed BackstageConditionReason = "Deployed" + BackstageConditionReasonFailed BackstageConditionReason = "DeployFailed" + BackstageConditionReasonInProgress BackstageConditionReason = "DeployInProgress" +) + +// BackstageSpec defines the desired state of Backstage +type BackstageSpec struct { + // Configuration for Backstage. Optional. + Application *Application `json:"application,omitempty"` + + // Raw Runtime RuntimeObjects configuration. For Advanced scenarios. + RawRuntimeConfig *RuntimeConfig `json:"rawRuntimeConfig,omitempty"` + + // Configuration for database access. Optional. + Database *Database `json:"database,omitempty"` + + // Valid fragment of Deployment to be merged with default/raw configuration. + // Set the Deployment's metadata and|or spec fields you want to override or add. + // Optional. + // +kubebuilder:pruning:PreserveUnknownFields + Deployment *BackstageDeployment `json:"deployment,omitempty"` +} + +type BackstageDeployment struct { + // Valid fragment of Deployment to be merged with default/raw configuration. + // Set the Deployment's metadata and|or spec fields you want to override or add. + // Optional. + // +kubebuilder:pruning:PreserveUnknownFields + Patch *apiextensionsv1.JSON `json:"patch,omitempty"` +} + +type RuntimeConfig struct { + // Name of ConfigMap containing Backstage runtime objects configuration + BackstageConfigName string `json:"backstageConfig,omitempty"` + // Name of ConfigMap containing LocalDb (PostgreSQL) runtime objects configuration + LocalDbConfigName string `json:"localDbConfig,omitempty"` +} + +type Database struct { + // Control the creation of a local PostgreSQL DB. Set to false if using for example an external Database for Backstage. + // +optional + //+kubebuilder:default=true + EnableLocalDb *bool `json:"enableLocalDb,omitempty"` + + // Name of the secret for database authentication. Optional. + // For a local database deployment (EnableLocalDb=true), a secret will be auto generated if it does not exist. + // The secret shall include information used for the database access. + // An example for PostgreSQL DB access: + // "POSTGRES_PASSWORD": "rl4s3Fh4ng3M4" + // "POSTGRES_PORT": "5432" + // "POSTGRES_USER": "postgres" + // "POSTGRESQL_ADMIN_PASSWORD": "rl4s3Fh4ng3M4" + // "POSTGRES_HOST": "backstage-psql-bs1" # For local database, set to "backstage-psql-". + AuthSecretName string `json:"authSecretName,omitempty"` +} + +type Application struct { + // References to existing app-configs ConfigMap objects, that will be mounted as files in the specified mount path. + // Each element can be a reference to any ConfigMap or Secret, + // and will be mounted inside the main application container under a specified mount directory. + // Additionally, each file will be passed as a `--config /mount/path/to/configmap/key` to the + // main container args in the order of the entries defined in the AppConfigs list. + // But bear in mind that for a single ConfigMap element containing several filenames, + // the order in which those files will be appended to the main container args cannot be guaranteed. + // So if you want to pass multiple app-config files, it is recommended to pass one ConfigMap per app-config file. + // +optional + AppConfig *AppConfig `json:"appConfig,omitempty"` + + // Reference to an existing ConfigMap for Dynamic Plugins. + // A new one will be generated with the default config if not set. + // The ConfigMap object must have an existing key named: 'dynamic-plugins.yaml'. + // +optional + DynamicPluginsConfigMapName string `json:"dynamicPluginsConfigMapName,omitempty"` + + // References to existing Config objects to use as extra config files. + // They will be mounted as files in the specified mount path. + // Each element can be a reference to any ConfigMap or Secret. + // +optional + ExtraFiles *ExtraFiles `json:"extraFiles,omitempty"` + + // Extra environment variables + // +optional + ExtraEnvs *ExtraEnvs `json:"extraEnvs,omitempty"` + + // Number of desired replicas to set in the Backstage Deployment. + // Defaults to 1. + // +optional + //+kubebuilder:default=1 + Replicas *int32 `json:"replicas,omitempty"` + + // Custom image to use in all containers (including Init Containers). + // It is your responsibility to make sure the image is from trusted sources and has been validated for security compliance + // +optional + Image *string `json:"image,omitempty"` + + // Image Pull Secrets to use in all containers (including Init Containers) + // +optional + ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` + + // Route configuration. Used for OpenShift only. + Route *Route `json:"route,omitempty"` +} + +type AppConfig struct { + // Mount path for all app-config files listed in the ConfigMapRefs field + // +optional + // +kubebuilder:default=/opt/app-root/src + MountPath string `json:"mountPath,omitempty"` + + // List of ConfigMaps storing the app-config files. Will be mounted as files under the MountPath specified. + // For each item in this array, if a key is not specified, it means that all keys in the ConfigMap will be mounted as files. + // Otherwise, only the specified key will be mounted as a file. + // Bear in mind not to put sensitive data in those ConfigMaps. Instead, your app-config content can reference + // environment variables (which you can set with the ExtraEnvs field) and/or include extra files (see the ExtraFiles field). + // More details on https://backstage.io/docs/conf/writing/. + // +optional + ConfigMaps []ObjectKeyRef `json:"configMaps,omitempty"` +} + +type ExtraFiles struct { + // Mount path for all extra configuration files listed in the Items field + // +optional + // +kubebuilder:default=/opt/app-root/src + MountPath string `json:"mountPath,omitempty"` + + // List of references to ConfigMaps objects mounted as extra files under the MountPath specified. + // For each item in this array, if a key is not specified, it means that all keys in the ConfigMap will be mounted as files. + // Otherwise, only the specified key will be mounted as a file. + // +optional + ConfigMaps []ObjectKeyRef `json:"configMaps,omitempty"` + + // List of references to Secrets objects mounted as extra files under the MountPath specified. + // For each item in this array, a key must be specified that will be mounted as a file. + // +optional + Secrets []ObjectKeyRef `json:"secrets,omitempty"` +} + +type ExtraEnvs struct { + // List of references to ConfigMaps objects to inject as additional environment variables. + // For each item in this array, if a key is not specified, it means that all keys in the ConfigMap will be injected as additional environment variables. + // Otherwise, only the specified key will be injected as an additional environment variable. + // +optional + ConfigMaps []ObjectKeyRef `json:"configMaps,omitempty"` + + // List of references to Secrets objects to inject as additional environment variables. + // For each item in this array, if a key is not specified, it means that all keys in the Secret will be injected as additional environment variables. + // Otherwise, only the specified key will be injected as environment variable. + // +optional + Secrets []ObjectKeyRef `json:"secrets,omitempty"` + + // List of name and value pairs to add as environment variables. + // +optional + Envs []Env `json:"envs,omitempty"` +} + +type ObjectKeyRef struct { + // Name of the object + // We support only ConfigMaps and Secrets. + //+kubebuilder:validation:Required + Name string `json:"name"` + + // Key in the object + // +optional + Key string `json:"key,omitempty"` +} + +type Env struct { + // Name of the environment variable + //+kubebuilder:validation:Required + Name string `json:"name"` + + // Value of the environment variable + //+kubebuilder:validation:Required + Value string `json:"value"` +} + +// BackstageStatus defines the observed state of Backstage +type BackstageStatus struct { + // Conditions is the list of conditions describing the state of the runtime + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:storageversion + +// Backstage is the Schema for the backstages API +type Backstage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackstageSpec `json:"spec,omitempty"` + Status BackstageStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// BackstageList contains a list of Backstage +type BackstageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Backstage `json:"items"` +} + +// Route specifies configuration parameters for OpenShift Route for Backstage. +// Only a secured edge route is supported for Backstage. +type Route struct { + // Control the creation of a Route on OpenShift. + // +optional + //+kubebuilder:default=true + Enabled *bool `json:"enabled,omitempty"` + + // Host is an alias/DNS that points to the service. Optional. + // Ignored if Enabled is false. + // If not specified a route name will typically be automatically + // chosen. Must follow DNS952 subdomain conventions. + // +optional + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + + // Subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). + // Ignored if Enabled is false. + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + // +optional + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + Subdomain string `json:"subdomain,omitempty"` + + // The tls field provides the ability to configure certificates for the route. + // Ignored if Enabled is false. + // +optional + TLS *TLS `json:"tls,omitempty"` +} + +type TLS struct { + // certificate provides certificate contents. This should be a single serving certificate, not a certificate + // chain. Do not include a CA certificate. + Certificate string `json:"certificate,omitempty"` + + // ExternalCertificateSecretName provides certificate contents as a secret reference. + // This should be a single serving certificate, not a certificate + // chain. Do not include a CA certificate. The secret referenced should + // be present in the same namespace as that of the Route. + // Forbidden when `certificate` is set. + // Note that securing Routes with external certificates in TLS secrets is a Technology Preview feature in OpenShift, + // and requires enabling the `RouteExternalCertificate` OpenShift Feature Gate and might not be functionally complete. + // +optional + ExternalCertificateSecretName string `json:"externalCertificateSecretName,omitempty"` + + // key provides key file contents + Key string `json:"key,omitempty"` + + // caCertificate provides the cert authority certificate contents + CACertificate string `json:"caCertificate,omitempty"` +} + +func init() { + SchemeBuilder.Register(&Backstage{}, &BackstageList{}) +} + +// IsLocalDbEnabled returns true if Local database is configured and enabled +func (s *BackstageSpec) IsLocalDbEnabled() bool { + if s.Database == nil { + return true + } + return ptr.Deref(s.Database.EnableLocalDb, true) +} + +// IsRouteEnabled returns value of Application.Route.Enabled if defined or true by default +func (s *BackstageSpec) IsRouteEnabled() bool { + if s.Application != nil && s.Application.Route != nil { + return ptr.Deref(s.Application.Route.Enabled, true) + } + return true +} + +func (s *BackstageSpec) IsAuthSecretSpecified() bool { + return s.Database != nil && s.Database.AuthSecretName != "" +} diff --git a/api/v1alpha2/groupversion_info.go b/api/v1alpha2/groupversion_info.go new file mode 100644 index 00000000..fb8c4342 --- /dev/null +++ b/api/v1alpha2/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2023 Red Hat Inc.. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha2 contains API Schema definitions for the v1alpha2 API group +// +kubebuilder:object:generate=true +// +groupName=rhdh.redhat.com +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "rhdh.redhat.com", Version: "v1alpha2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1alpha2/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 00000000..280d13d4 --- /dev/null +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,394 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2023 Red Hat Inc.. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppConfig) DeepCopyInto(out *AppConfig) { + *out = *in + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]ObjectKeyRef, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppConfig. +func (in *AppConfig) DeepCopy() *AppConfig { + if in == nil { + return nil + } + out := new(AppConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Application) DeepCopyInto(out *Application) { + *out = *in + if in.AppConfig != nil { + in, out := &in.AppConfig, &out.AppConfig + *out = new(AppConfig) + (*in).DeepCopyInto(*out) + } + if in.ExtraFiles != nil { + in, out := &in.ExtraFiles, &out.ExtraFiles + *out = new(ExtraFiles) + (*in).DeepCopyInto(*out) + } + if in.ExtraEnvs != nil { + in, out := &in.ExtraEnvs, &out.ExtraEnvs + *out = new(ExtraEnvs) + (*in).DeepCopyInto(*out) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = new(Route) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Application. +func (in *Application) DeepCopy() *Application { + if in == nil { + return nil + } + out := new(Application) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backstage) DeepCopyInto(out *Backstage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backstage. +func (in *Backstage) DeepCopy() *Backstage { + if in == nil { + return nil + } + out := new(Backstage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Backstage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackstageDeployment) DeepCopyInto(out *BackstageDeployment) { + *out = *in + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = new(v1.JSON) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackstageDeployment. +func (in *BackstageDeployment) DeepCopy() *BackstageDeployment { + if in == nil { + return nil + } + out := new(BackstageDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackstageList) DeepCopyInto(out *BackstageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Backstage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackstageList. +func (in *BackstageList) DeepCopy() *BackstageList { + if in == nil { + return nil + } + out := new(BackstageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackstageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackstageSpec) DeepCopyInto(out *BackstageSpec) { + *out = *in + if in.Application != nil { + in, out := &in.Application, &out.Application + *out = new(Application) + (*in).DeepCopyInto(*out) + } + if in.RawRuntimeConfig != nil { + in, out := &in.RawRuntimeConfig, &out.RawRuntimeConfig + *out = new(RuntimeConfig) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(Database) + (*in).DeepCopyInto(*out) + } + if in.Deployment != nil { + in, out := &in.Deployment, &out.Deployment + *out = new(BackstageDeployment) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackstageSpec. +func (in *BackstageSpec) DeepCopy() *BackstageSpec { + if in == nil { + return nil + } + out := new(BackstageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackstageStatus) DeepCopyInto(out *BackstageStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackstageStatus. +func (in *BackstageStatus) DeepCopy() *BackstageStatus { + if in == nil { + return nil + } + out := new(BackstageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Database) DeepCopyInto(out *Database) { + *out = *in + if in.EnableLocalDb != nil { + in, out := &in.EnableLocalDb, &out.EnableLocalDb + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database. +func (in *Database) DeepCopy() *Database { + if in == nil { + return nil + } + out := new(Database) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Env) DeepCopyInto(out *Env) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Env. +func (in *Env) DeepCopy() *Env { + if in == nil { + return nil + } + out := new(Env) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtraEnvs) DeepCopyInto(out *ExtraEnvs) { + *out = *in + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]ObjectKeyRef, len(*in)) + copy(*out, *in) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectKeyRef, len(*in)) + copy(*out, *in) + } + if in.Envs != nil { + in, out := &in.Envs, &out.Envs + *out = make([]Env, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraEnvs. +func (in *ExtraEnvs) DeepCopy() *ExtraEnvs { + if in == nil { + return nil + } + out := new(ExtraEnvs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtraFiles) DeepCopyInto(out *ExtraFiles) { + *out = *in + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]ObjectKeyRef, len(*in)) + copy(*out, *in) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectKeyRef, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraFiles. +func (in *ExtraFiles) DeepCopy() *ExtraFiles { + if in == nil { + return nil + } + out := new(ExtraFiles) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectKeyRef) DeepCopyInto(out *ObjectKeyRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectKeyRef. +func (in *ObjectKeyRef) DeepCopy() *ObjectKeyRef { + if in == nil { + return nil + } + out := new(ObjectKeyRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Route) DeepCopyInto(out *Route) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLS) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. +func (in *Route) DeepCopy() *Route { + if in == nil { + return nil + } + out := new(Route) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeConfig) DeepCopyInto(out *RuntimeConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeConfig. +func (in *RuntimeConfig) DeepCopy() *RuntimeConfig { + if in == nil { + return nil + } + out := new(RuntimeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLS) DeepCopyInto(out *TLS) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLS. +func (in *TLS) DeepCopy() *TLS { + if in == nil { + return nil + } + out := new(TLS) + in.DeepCopyInto(out) + return out +} diff --git a/bundle/manifests/backstage-default-config_v1_configmap.yaml b/bundle/manifests/backstage-default-config_v1_configmap.yaml index 3f3e7b5e..6a87fa42 100644 --- a/bundle/manifests/backstage-default-config_v1_configmap.yaml +++ b/bundle/manifests/backstage-default-config_v1_configmap.yaml @@ -1,6 +1,6 @@ apiVersion: v1 data: - app-config.yaml: |- + app-config.yaml: | apiVersion: v1 kind: ConfigMap metadata: @@ -8,14 +8,13 @@ data: data: default.app-config.yaml: | backend: - database: - connection: - password: ${POSTGRES_PASSWORD} - user: ${POSTGRES_USER} auth: - keys: - # This is a default value, which you should change by providing your own app-config - - secret: "pl4s3Ch4ng3M3" + externalAccess: + - type: legacy + options: + subject: legacy-default-config + # This is a default value, which you should change by providing your own app-config + secret: "pl4s3Ch4ng3M3" db-secret.yaml: |- apiVersion: v1 kind: Secret @@ -28,17 +27,6 @@ data: # POSTGRES_USER: postgres # POSTGRESQL_ADMIN_PASSWORD: admin123 # POSTGRES_HOST: bs1-db-service #placeholder -db-service - db-service-hl.yaml: | - apiVersion: v1 - kind: Service - metadata: - name: backstage-psql-cr1-hl # placeholder for 'backstage-psql--hl' - spec: - selector: - rhdh.redhat.com/app: backstage-psql-cr1 # placeholder for 'backstage-psql-' - clusterIP: None - ports: - - port: 5432 db-service.yaml: | apiVersion: v1 kind: Service @@ -47,6 +35,7 @@ data: spec: selector: rhdh.redhat.com/app: backstage-psql-cr1 # placeholder for 'backstage-psql-' + clusterIP: None ports: - port: 5432 db-statefulset.yaml: |- @@ -65,7 +54,6 @@ data: metadata: labels: rhdh.redhat.com/app: backstage-psql-cr1 # placeholder for 'backstage-psql-' - name: backstage-db-cr1 # placeholder for 'backstage-psql-' spec: # fsGroup does not work for Openshift # AKS/EKS does not work w/o it @@ -162,7 +150,7 @@ data: resources: requests: storage: 1Gi - deployment.yaml: |- + deployment.yaml: | apiVersion: apps/v1 kind: Deployment metadata: @@ -198,16 +186,24 @@ data: defaultMode: 420 optional: true secretName: dynamic-plugins-npmrc + - emptyDir: {} + name: npmcacache initContainers: - name: install-dynamic-plugins command: - ./install-dynamic-plugins.sh - /dynamic-plugins-root - image: quay.io/janus-idp/backstage-showcase:latest # will be replaced with the actual image quay.io/janus-idp/backstage-showcase:next + # image will be replaced by the value of the `RELATED_IMAGE_backstage` env var, if set + image: quay.io/janus-idp/backstage-showcase:next imagePullPolicy: IfNotPresent securityContext: runAsNonRoot: true allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL env: - name: NPM_CONFIG_USERCONFIG value: /opt/app-root/src/.npmrc.dynamic-plugins @@ -218,6 +214,8 @@ data: name: dynamic-plugins-npmrc readOnly: true subPath: .npmrc + - mountPath: /opt/app-root/src/.npm/_cacache + name: npmcacache workingDir: /opt/app-root/src resources: requests: @@ -230,12 +228,17 @@ data: containers: - name: backstage-backend # image will be replaced by the value of the `RELATED_IMAGE_backstage` env var, if set - image: quay.io/janus-idp/backstage-showcase:latest + image: quay.io/janus-idp/backstage-showcase:next imagePullPolicy: IfNotPresent args: - "--config" - "dynamic-plugins-root/app-config.dynamic-plugins.yaml" securityContext: + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault runAsNonRoot: true allowPrivilegeEscalation: false readinessProbe: diff --git a/bundle/manifests/backstage-operator.clusterserviceversion.yaml b/bundle/manifests/backstage-operator.clusterserviceversion.yaml index a7d72242..59f474c3 100644 --- a/bundle/manifests/backstage-operator.clusterserviceversion.yaml +++ b/bundle/manifests/backstage-operator.clusterserviceversion.yaml @@ -5,7 +5,7 @@ metadata: alm-examples: |- [ { - "apiVersion": "rhdh.redhat.com/v1alpha1", + "apiVersion": "rhdh.redhat.com/v1alpha2", "kind": "Backstage", "metadata": { "labels": { @@ -21,12 +21,12 @@ metadata: } ] capabilities: Seamless Upgrades - createdAt: "2024-04-01T18:15:06Z" + createdAt: "2024-07-25T11:50:13Z" operatorframework.io/suggested-namespace: backstage-system operators.operatorframework.io/builder: operator-sdk-v1.33.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 - skipRange: '>=0.0.1 <0.2.0' - name: backstage-operator.v0.2.0 + skipRange: '>=0.0.1 <0.3.0' + name: backstage-operator.v0.3.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -37,6 +37,11 @@ spec: kind: Backstage name: backstages.rhdh.redhat.com version: v1alpha1 + - description: Backstage is the Schema for the backstages API + displayName: Backstage + kind: Backstage + name: backstages.rhdh.redhat.com + version: v1alpha2 description: Operator to deploy Backstage on Kubernetes displayName: Red Hat Developer Hub Operator icon: @@ -50,6 +55,7 @@ spec: - "" resources: - configmaps + - secrets - services verbs: - create @@ -68,30 +74,10 @@ spec: - get - list - watch - - apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - patch - - update - apiGroups: - apps resources: - deployments - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - apps - resources: - statefulsets verbs: - create @@ -227,8 +213,8 @@ spec: - name: RELATED_IMAGE_postgresql value: quay.io/fedora/postgresql-15:latest - name: RELATED_IMAGE_backstage - value: quay.io/janus-idp/backstage-showcase:latest - image: quay.io/janus-idp/operator:0.2.0 + value: quay.io/janus-idp/backstage-showcase:next + image: quay.io/janus-idp/operator:0.3.0 livenessProbe: httpGet: path: /healthz @@ -246,10 +232,10 @@ spec: limits: cpu: 500m ephemeral-storage: 20Mi - memory: 128Mi + memory: 1Gi requests: cpu: 10m - memory: 64Mi + memory: 128Mi securityContext: allowPrivilegeEscalation: false capabilities: @@ -319,10 +305,10 @@ spec: maintainers: - email: asoro@redhat.com name: Armel Soro + - email: cdaley@redhat.com + name: Corey Daley - email: gazarenk@redhat.com name: Gennady Azarenkov - - email: jianrzha@redhat.com - name: Jianrong Zhang maturity: alpha minKubeVersion: 1.25.0 provider: @@ -331,6 +317,6 @@ spec: relatedImages: - image: quay.io/fedora/postgresql-15:latest name: postgresql - - image: quay.io/janus-idp/backstage-showcase:latest + - image: quay.io/janus-idp/backstage-showcase:next name: backstage - version: 0.2.0 + version: 0.3.0 diff --git a/bundle/manifests/rhdh.redhat.com_backstages.yaml b/bundle/manifests/rhdh.redhat.com_backstages.yaml index ebbb9bfb..2ae9d8ee 100644 --- a/bundle/manifests/rhdh.redhat.com_backstages.yaml +++ b/bundle/manifests/rhdh.redhat.com_backstages.yaml @@ -14,7 +14,11 @@ spec: singular: backstage scope: Namespaced versions: - - name: v1alpha1 + - deprecated: true + deprecationWarning: Since 1.3.0 spec.application.image, spec.application.replicas, + spec.application.imagePullSecrets are deprecated in favor of corresponding spec.deployment + fields + name: v1alpha1 schema: openAPIV3Schema: description: Backstage is the Schema for the backstages API @@ -256,7 +260,11 @@ spec: serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. - Forbidden when `certificate` is set. + Forbidden when `certificate` is set. Note that securing + Routes with external certificates in TLS secrets is + a Technology Preview feature in OpenShift, and requires + enabling the `RouteExternalCertificate` OpenShift Feature + Gate and might not be functionally complete. type: string key: description: key provides key file contents @@ -373,6 +381,384 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - name: v1alpha2 + schema: + openAPIV3Schema: + description: Backstage is the Schema for the backstages API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackstageSpec defines the desired state of Backstage + properties: + application: + description: Configuration for Backstage. Optional. + properties: + appConfig: + description: References to existing app-configs ConfigMap objects, + that will be mounted as files in the specified mount path. Each + element can be a reference to any ConfigMap or Secret, and will + be mounted inside the main application container under a specified + mount directory. Additionally, each file will be passed as a + `--config /mount/path/to/configmap/key` to the main container + args in the order of the entries defined in the AppConfigs list. + But bear in mind that for a single ConfigMap element containing + several filenames, the order in which those files will be appended + to the main container args cannot be guaranteed. So if you want + to pass multiple app-config files, it is recommended to pass + one ConfigMap per app-config file. + properties: + configMaps: + description: List of ConfigMaps storing the app-config files. + Will be mounted as files under the MountPath specified. + For each item in this array, if a key is not specified, + it means that all keys in the ConfigMap will be mounted + as files. Otherwise, only the specified key will be mounted + as a file. Bear in mind not to put sensitive data in those + ConfigMaps. Instead, your app-config content can reference + environment variables (which you can set with the ExtraEnvs + field) and/or include extra files (see the ExtraFiles field). + More details on https://backstage.io/docs/conf/writing/. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps + and Secrets. + type: string + required: + - name + type: object + type: array + mountPath: + default: /opt/app-root/src + description: Mount path for all app-config files listed in + the ConfigMapRefs field + type: string + type: object + dynamicPluginsConfigMapName: + description: 'Reference to an existing ConfigMap for Dynamic Plugins. + A new one will be generated with the default config if not set. + The ConfigMap object must have an existing key named: ''dynamic-plugins.yaml''.' + type: string + extraEnvs: + description: Extra environment variables + properties: + configMaps: + description: List of references to ConfigMaps objects to inject + as additional environment variables. For each item in this + array, if a key is not specified, it means that all keys + in the ConfigMap will be injected as additional environment + variables. Otherwise, only the specified key will be injected + as an additional environment variable. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps + and Secrets. + type: string + required: + - name + type: object + type: array + envs: + description: List of name and value pairs to add as environment + variables. + items: + properties: + name: + description: Name of the environment variable + type: string + value: + description: Value of the environment variable + type: string + required: + - name + - value + type: object + type: array + secrets: + description: List of references to Secrets objects to inject + as additional environment variables. For each item in this + array, if a key is not specified, it means that all keys + in the Secret will be injected as additional environment + variables. Otherwise, only the specified key will be injected + as environment variable. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps + and Secrets. + type: string + required: + - name + type: object + type: array + type: object + extraFiles: + description: References to existing Config objects to use as extra + config files. They will be mounted as files in the specified + mount path. Each element can be a reference to any ConfigMap + or Secret. + properties: + configMaps: + description: List of references to ConfigMaps objects mounted + as extra files under the MountPath specified. For each item + in this array, if a key is not specified, it means that + all keys in the ConfigMap will be mounted as files. Otherwise, + only the specified key will be mounted as a file. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps + and Secrets. + type: string + required: + - name + type: object + type: array + mountPath: + default: /opt/app-root/src + description: Mount path for all extra configuration files + listed in the Items field + type: string + secrets: + description: List of references to Secrets objects mounted + as extra files under the MountPath specified. For each item + in this array, a key must be specified that will be mounted + as a file. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps + and Secrets. + type: string + required: + - name + type: object + type: array + type: object + image: + description: Custom image to use in all containers (including + Init Containers). It is your responsibility to make sure the + image is from trusted sources and has been validated for security + compliance + type: string + imagePullSecrets: + description: Image Pull Secrets to use in all containers (including + Init Containers) + items: + type: string + type: array + replicas: + default: 1 + description: Number of desired replicas to set in the Backstage + Deployment. Defaults to 1. + format: int32 + type: integer + route: + description: Route configuration. Used for OpenShift only. + properties: + enabled: + default: true + description: Control the creation of a Route on OpenShift. + type: boolean + host: + description: Host is an alias/DNS that points to the service. + Optional. Ignored if Enabled is false. If not specified + a route name will typically be automatically chosen. Must + follow DNS952 subdomain conventions. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + subdomain: + description: 'Subdomain is a DNS subdomain that is requested + within the ingress controller''s domain (as a subdomain). + Ignored if Enabled is false. Example: subdomain `frontend` + automatically receives the router subdomain `apps.mycluster.com` + to have a full hostname `frontend.apps.mycluster.com`.' + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + tls: + description: The tls field provides the ability to configure + certificates for the route. Ignored if Enabled is false. + properties: + caCertificate: + description: caCertificate provides the cert authority + certificate contents + type: string + certificate: + description: certificate provides certificate contents. + This should be a single serving certificate, not a certificate + chain. Do not include a CA certificate. + type: string + externalCertificateSecretName: + description: ExternalCertificateSecretName provides certificate + contents as a secret reference. This should be a single + serving certificate, not a certificate chain. Do not + include a CA certificate. The secret referenced should + be present in the same namespace as that of the Route. + Forbidden when `certificate` is set. Note that securing + Routes with external certificates in TLS secrets is + a Technology Preview feature in OpenShift, and requires + enabling the `RouteExternalCertificate` OpenShift Feature + Gate and might not be functionally complete. + type: string + key: + description: key provides key file contents + type: string + type: object + type: object + type: object + database: + description: Configuration for database access. Optional. + properties: + authSecretName: + description: 'Name of the secret for database authentication. + Optional. For a local database deployment (EnableLocalDb=true), + a secret will be auto generated if it does not exist. The secret + shall include information used for the database access. An example + for PostgreSQL DB access: "POSTGRES_PASSWORD": "rl4s3Fh4ng3M4" + "POSTGRES_PORT": "5432" "POSTGRES_USER": "postgres" "POSTGRESQL_ADMIN_PASSWORD": + "rl4s3Fh4ng3M4" "POSTGRES_HOST": "backstage-psql-bs1" # For + local database, set to "backstage-psql-".' + type: string + enableLocalDb: + default: true + description: Control the creation of a local PostgreSQL DB. Set + to false if using for example an external Database for Backstage. + type: boolean + type: object + deployment: + description: Valid fragment of Deployment to be merged with default/raw + configuration. Set the Deployment's metadata and|or spec fields + you want to override or add. Optional. + properties: + patch: + description: Valid fragment of Deployment to be merged with default/raw + configuration. Set the Deployment's metadata and|or spec fields + you want to override or add. Optional. + x-kubernetes-preserve-unknown-fields: true + type: object + x-kubernetes-preserve-unknown-fields: true + rawRuntimeConfig: + description: Raw Runtime RuntimeObjects configuration. For Advanced + scenarios. + properties: + backstageConfig: + description: Name of ConfigMap containing Backstage runtime objects + configuration + type: string + localDbConfig: + description: Name of ConfigMap containing LocalDb (PostgreSQL) + runtime objects configuration + type: string + type: object + type: object + status: + description: BackstageStatus defines the observed state of Backstage + properties: + conditions: + description: Conditions is the list of conditions describing the state + of the runtime + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/rhdh.redhat.com_backstages.yaml b/config/crd/bases/rhdh.redhat.com_backstages.yaml index 2b990068..5930c075 100644 --- a/config/crd/bases/rhdh.redhat.com_backstages.yaml +++ b/config/crd/bases/rhdh.redhat.com_backstages.yaml @@ -15,7 +15,11 @@ spec: singular: backstage scope: Namespaced versions: - - name: v1alpha1 + - deprecated: true + deprecationWarning: Since 1.3.0 spec.application.image, spec.application.replicas, + spec.application.imagePullSecrets are deprecated in favor of corresponding spec.deployment + fields + name: v1alpha1 schema: openAPIV3Schema: description: Backstage is the Schema for the backstages API @@ -257,7 +261,11 @@ spec: serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. - Forbidden when `certificate` is set. + Forbidden when `certificate` is set. Note that securing + Routes with external certificates in TLS secrets is + a Technology Preview feature in OpenShift, and requires + enabling the `RouteExternalCertificate` OpenShift Feature + Gate and might not be functionally complete. type: string key: description: key provides key file contents @@ -374,6 +382,384 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - name: v1alpha2 + schema: + openAPIV3Schema: + description: Backstage is the Schema for the backstages API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackstageSpec defines the desired state of Backstage + properties: + application: + description: Configuration for Backstage. Optional. + properties: + appConfig: + description: References to existing app-configs ConfigMap objects, + that will be mounted as files in the specified mount path. Each + element can be a reference to any ConfigMap or Secret, and will + be mounted inside the main application container under a specified + mount directory. Additionally, each file will be passed as a + `--config /mount/path/to/configmap/key` to the main container + args in the order of the entries defined in the AppConfigs list. + But bear in mind that for a single ConfigMap element containing + several filenames, the order in which those files will be appended + to the main container args cannot be guaranteed. So if you want + to pass multiple app-config files, it is recommended to pass + one ConfigMap per app-config file. + properties: + configMaps: + description: List of ConfigMaps storing the app-config files. + Will be mounted as files under the MountPath specified. + For each item in this array, if a key is not specified, + it means that all keys in the ConfigMap will be mounted + as files. Otherwise, only the specified key will be mounted + as a file. Bear in mind not to put sensitive data in those + ConfigMaps. Instead, your app-config content can reference + environment variables (which you can set with the ExtraEnvs + field) and/or include extra files (see the ExtraFiles field). + More details on https://backstage.io/docs/conf/writing/. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps + and Secrets. + type: string + required: + - name + type: object + type: array + mountPath: + default: /opt/app-root/src + description: Mount path for all app-config files listed in + the ConfigMapRefs field + type: string + type: object + dynamicPluginsConfigMapName: + description: 'Reference to an existing ConfigMap for Dynamic Plugins. + A new one will be generated with the default config if not set. + The ConfigMap object must have an existing key named: ''dynamic-plugins.yaml''.' + type: string + extraEnvs: + description: Extra environment variables + properties: + configMaps: + description: List of references to ConfigMaps objects to inject + as additional environment variables. For each item in this + array, if a key is not specified, it means that all keys + in the ConfigMap will be injected as additional environment + variables. Otherwise, only the specified key will be injected + as an additional environment variable. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps + and Secrets. + type: string + required: + - name + type: object + type: array + envs: + description: List of name and value pairs to add as environment + variables. + items: + properties: + name: + description: Name of the environment variable + type: string + value: + description: Value of the environment variable + type: string + required: + - name + - value + type: object + type: array + secrets: + description: List of references to Secrets objects to inject + as additional environment variables. For each item in this + array, if a key is not specified, it means that all keys + in the Secret will be injected as additional environment + variables. Otherwise, only the specified key will be injected + as environment variable. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps + and Secrets. + type: string + required: + - name + type: object + type: array + type: object + extraFiles: + description: References to existing Config objects to use as extra + config files. They will be mounted as files in the specified + mount path. Each element can be a reference to any ConfigMap + or Secret. + properties: + configMaps: + description: List of references to ConfigMaps objects mounted + as extra files under the MountPath specified. For each item + in this array, if a key is not specified, it means that + all keys in the ConfigMap will be mounted as files. Otherwise, + only the specified key will be mounted as a file. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps + and Secrets. + type: string + required: + - name + type: object + type: array + mountPath: + default: /opt/app-root/src + description: Mount path for all extra configuration files + listed in the Items field + type: string + secrets: + description: List of references to Secrets objects mounted + as extra files under the MountPath specified. For each item + in this array, a key must be specified that will be mounted + as a file. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps + and Secrets. + type: string + required: + - name + type: object + type: array + type: object + image: + description: Custom image to use in all containers (including + Init Containers). It is your responsibility to make sure the + image is from trusted sources and has been validated for security + compliance + type: string + imagePullSecrets: + description: Image Pull Secrets to use in all containers (including + Init Containers) + items: + type: string + type: array + replicas: + default: 1 + description: Number of desired replicas to set in the Backstage + Deployment. Defaults to 1. + format: int32 + type: integer + route: + description: Route configuration. Used for OpenShift only. + properties: + enabled: + default: true + description: Control the creation of a Route on OpenShift. + type: boolean + host: + description: Host is an alias/DNS that points to the service. + Optional. Ignored if Enabled is false. If not specified + a route name will typically be automatically chosen. Must + follow DNS952 subdomain conventions. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + subdomain: + description: 'Subdomain is a DNS subdomain that is requested + within the ingress controller''s domain (as a subdomain). + Ignored if Enabled is false. Example: subdomain `frontend` + automatically receives the router subdomain `apps.mycluster.com` + to have a full hostname `frontend.apps.mycluster.com`.' + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + tls: + description: The tls field provides the ability to configure + certificates for the route. Ignored if Enabled is false. + properties: + caCertificate: + description: caCertificate provides the cert authority + certificate contents + type: string + certificate: + description: certificate provides certificate contents. + This should be a single serving certificate, not a certificate + chain. Do not include a CA certificate. + type: string + externalCertificateSecretName: + description: ExternalCertificateSecretName provides certificate + contents as a secret reference. This should be a single + serving certificate, not a certificate chain. Do not + include a CA certificate. The secret referenced should + be present in the same namespace as that of the Route. + Forbidden when `certificate` is set. Note that securing + Routes with external certificates in TLS secrets is + a Technology Preview feature in OpenShift, and requires + enabling the `RouteExternalCertificate` OpenShift Feature + Gate and might not be functionally complete. + type: string + key: + description: key provides key file contents + type: string + type: object + type: object + type: object + database: + description: Configuration for database access. Optional. + properties: + authSecretName: + description: 'Name of the secret for database authentication. + Optional. For a local database deployment (EnableLocalDb=true), + a secret will be auto generated if it does not exist. The secret + shall include information used for the database access. An example + for PostgreSQL DB access: "POSTGRES_PASSWORD": "rl4s3Fh4ng3M4" + "POSTGRES_PORT": "5432" "POSTGRES_USER": "postgres" "POSTGRESQL_ADMIN_PASSWORD": + "rl4s3Fh4ng3M4" "POSTGRES_HOST": "backstage-psql-bs1" # For + local database, set to "backstage-psql-".' + type: string + enableLocalDb: + default: true + description: Control the creation of a local PostgreSQL DB. Set + to false if using for example an external Database for Backstage. + type: boolean + type: object + deployment: + description: Valid fragment of Deployment to be merged with default/raw + configuration. Set the Deployment's metadata and|or spec fields + you want to override or add. Optional. + properties: + patch: + description: Valid fragment of Deployment to be merged with default/raw + configuration. Set the Deployment's metadata and|or spec fields + you want to override or add. Optional. + x-kubernetes-preserve-unknown-fields: true + type: object + x-kubernetes-preserve-unknown-fields: true + rawRuntimeConfig: + description: Raw Runtime RuntimeObjects configuration. For Advanced + scenarios. + properties: + backstageConfig: + description: Name of ConfigMap containing Backstage runtime objects + configuration + type: string + localDbConfig: + description: Name of ConfigMap containing LocalDb (PostgreSQL) + runtime objects configuration + type: string + type: object + type: object + status: + description: BackstageStatus defines the observed state of Backstage + properties: + conditions: + description: Conditions is the list of conditions describing the state + of the runtime + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/manager/default-config/app-config.yaml b/config/manager/default-config/app-config.yaml index ccfe93e8..2b4849f0 100644 --- a/config/manager/default-config/app-config.yaml +++ b/config/manager/default-config/app-config.yaml @@ -5,11 +5,10 @@ metadata: data: default.app-config.yaml: | backend: - database: - connection: - password: ${POSTGRES_PASSWORD} - user: ${POSTGRES_USER} auth: - keys: - # This is a default value, which you should change by providing your own app-config - - secret: "pl4s3Ch4ng3M3" \ No newline at end of file + externalAccess: + - type: legacy + options: + subject: legacy-default-config + # This is a default value, which you should change by providing your own app-config + secret: "pl4s3Ch4ng3M3" diff --git a/config/manager/default-config/db-service-hl.yaml b/config/manager/default-config/db-service-hl.yaml deleted file mode 100644 index 74c80816..00000000 --- a/config/manager/default-config/db-service-hl.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: backstage-psql-cr1-hl # placeholder for 'backstage-psql--hl' -spec: - selector: - rhdh.redhat.com/app: backstage-psql-cr1 # placeholder for 'backstage-psql-' - clusterIP: None - ports: - - port: 5432 diff --git a/config/manager/default-config/db-service.yaml b/config/manager/default-config/db-service.yaml index 754b849d..4c1b9674 100644 --- a/config/manager/default-config/db-service.yaml +++ b/config/manager/default-config/db-service.yaml @@ -5,5 +5,6 @@ metadata: spec: selector: rhdh.redhat.com/app: backstage-psql-cr1 # placeholder for 'backstage-psql-' + clusterIP: None ports: - port: 5432 diff --git a/config/manager/default-config/db-statefulset.yaml b/config/manager/default-config/db-statefulset.yaml index 2b21d2f3..13e74132 100644 --- a/config/manager/default-config/db-statefulset.yaml +++ b/config/manager/default-config/db-statefulset.yaml @@ -13,7 +13,6 @@ spec: metadata: labels: rhdh.redhat.com/app: backstage-psql-cr1 # placeholder for 'backstage-psql-' - name: backstage-db-cr1 # placeholder for 'backstage-psql-' spec: # fsGroup does not work for Openshift # AKS/EKS does not work w/o it diff --git a/config/manager/default-config/deployment.yaml b/config/manager/default-config/deployment.yaml index 20d25f96..b9a9d471 100644 --- a/config/manager/default-config/deployment.yaml +++ b/config/manager/default-config/deployment.yaml @@ -33,16 +33,24 @@ spec: defaultMode: 420 optional: true secretName: dynamic-plugins-npmrc + - emptyDir: {} + name: npmcacache initContainers: - name: install-dynamic-plugins command: - ./install-dynamic-plugins.sh - /dynamic-plugins-root - image: quay.io/janus-idp/backstage-showcase:latest # will be replaced with the actual image quay.io/janus-idp/backstage-showcase:next + # image will be replaced by the value of the `RELATED_IMAGE_backstage` env var, if set + image: quay.io/janus-idp/backstage-showcase:next imagePullPolicy: IfNotPresent securityContext: runAsNonRoot: true allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL env: - name: NPM_CONFIG_USERCONFIG value: /opt/app-root/src/.npmrc.dynamic-plugins @@ -53,6 +61,8 @@ spec: name: dynamic-plugins-npmrc readOnly: true subPath: .npmrc + - mountPath: /opt/app-root/src/.npm/_cacache + name: npmcacache workingDir: /opt/app-root/src resources: requests: @@ -65,12 +75,17 @@ spec: containers: - name: backstage-backend # image will be replaced by the value of the `RELATED_IMAGE_backstage` env var, if set - image: quay.io/janus-idp/backstage-showcase:latest + image: quay.io/janus-idp/backstage-showcase:next imagePullPolicy: IfNotPresent args: - "--config" - "dynamic-plugins-root/app-config.dynamic-plugins.yaml" securityContext: + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault runAsNonRoot: true allowPrivilegeEscalation: false readinessProbe: @@ -109,4 +124,4 @@ spec: limits: cpu: 1000m memory: 2.5Gi - ephemeral-storage: 5Gi \ No newline at end of file + ephemeral-storage: 5Gi diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index ceec4e07..24d9b578 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -5,7 +5,7 @@ kind: Kustomization images: - name: controller newName: quay.io/janus-idp/operator - newTag: 0.2.0 + newTag: 0.3.0 generatorOptions: disableNameSuffixHash: true @@ -15,7 +15,6 @@ configMapGenerator: - default-config/app-config.yaml - default-config/db-secret.yaml - default-config/db-service.yaml - - default-config/db-service-hl.yaml - default-config/db-statefulset.yaml - default-config/deployment.yaml - default-config/dynamic-plugins.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index f11e35fb..e3c4286a 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -38,7 +38,7 @@ spec: spec: # Required because the operator does not work without a Service Account Token automountServiceAccountToken: true # NOSONAR - # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # (user): Uncomment the following code to configure the nodeAffinity expression # according to the platforms which are supported by your solution. # It is considered best practice to support multiple architectures. You can # build your manager image using the makefile target docker-buildx. @@ -60,7 +60,7 @@ spec: # - linux securityContext: runAsNonRoot: true - # TODO(user): For common cases that do not require escalating privileges + # (user): For common cases that do not require escalating privileges # it is recommended to ensure that all your Pods/Containers are restrictive. # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted # Please uncomment the following code if your project does NOT have to work on old Kubernetes @@ -76,7 +76,7 @@ spec: - name: RELATED_IMAGE_postgresql value: quay.io/fedora/postgresql-15:latest - name: RELATED_IMAGE_backstage - value: quay.io/janus-idp/backstage-showcase:latest + value: quay.io/janus-idp/backstage-showcase:next image: controller:latest name: manager securityContext: @@ -96,16 +96,14 @@ spec: port: 8081 initialDelaySeconds: 5 periodSeconds: 10 - # TODO(user): Configure the resources accordingly based on the project requirements. - # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: limits: cpu: 500m - memory: 128Mi + memory: 1Gi ephemeral-storage: 20Mi requests: cpu: 10m - memory: 64Mi + memory: 128Mi volumeMounts: - mountPath: /default-config name: default-config diff --git a/config/manifests/bases/backstage-operator.clusterserviceversion.yaml b/config/manifests/bases/backstage-operator.clusterserviceversion.yaml index 67c444a8..85f05436 100644 --- a/config/manifests/bases/backstage-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/backstage-operator.clusterserviceversion.yaml @@ -5,8 +5,8 @@ metadata: alm-examples: '[]' capabilities: Seamless Upgrades operatorframework.io/suggested-namespace: backstage-system - skipRange: '>=0.0.1 <0.2.0' - name: backstage-operator.v0.2.0 + skipRange: '>=0.0.1 <0.3.0' + name: backstage-operator.v0.3.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -17,6 +17,11 @@ spec: kind: Backstage name: backstages.rhdh.redhat.com version: v1alpha1 + - description: Backstage is the Schema for the backstages API + displayName: Backstage + kind: Backstage + name: backstages.rhdh.redhat.com + version: v1alpha2 description: Operator to deploy Backstage on Kubernetes displayName: Red Hat Developer Hub Operator icon: @@ -44,13 +49,13 @@ spec: maintainers: - email: asoro@redhat.com name: Armel Soro + - email: cdaley@redhat.com + name: Corey Daley - email: gazarenk@redhat.com name: Gennady Azarenkov - - email: jianrzha@redhat.com - name: Jianrong Zhang maturity: alpha minKubeVersion: 1.25.0 provider: name: Red Hat Inc. url: https://www.redhat.com/ - version: 0.2.0 + version: 0.3.0 diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 78c50678..48610f5b 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -9,6 +9,7 @@ rules: - "" resources: - configmaps + - secrets - services verbs: - create @@ -27,30 +28,10 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - patch - - update - apiGroups: - apps resources: - deployments - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - statefulsets verbs: - create diff --git a/config/samples/_v1alpha1_backstage.yaml b/config/samples/_v1alpha2_backstage.yaml similarity index 90% rename from config/samples/_v1alpha1_backstage.yaml rename to config/samples/_v1alpha2_backstage.yaml index 1b6a82cf..17e804b5 100644 --- a/config/samples/_v1alpha1_backstage.yaml +++ b/config/samples/_v1alpha2_backstage.yaml @@ -1,4 +1,4 @@ -apiVersion: rhdh.redhat.com/v1alpha1 +apiVersion: rhdh.redhat.com/v1alpha2 kind: Backstage metadata: labels: diff --git a/config/samples/catalog-subscription-template.yaml b/config/samples/catalog-subscription-template.yaml index 0cf520c3..648994c3 100644 --- a/config/samples/catalog-subscription-template.yaml +++ b/config/samples/catalog-subscription-template.yaml @@ -7,5 +7,5 @@ spec: installPlanApproval: Automatic name: backstage-operator source: backstage-operator - sourceNamespace: openshift-marketplace + sourceNamespace: {{OLM_NAMESPACE}} startingCSV: backstage-operator.v{{VERSION}} \ No newline at end of file diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index f66b0cd5..7dc86ec2 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,4 +1,4 @@ ## Append samples you want in your CSV to this file as resources ## resources: -- _v1alpha1_backstage.yaml +- _v1alpha2_backstage.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/controllers/backstage_controller.go b/controllers/backstage_controller.go index 5352f6ed..5c14fc6a 100644 --- a/controllers/backstage_controller.go +++ b/controllers/backstage_controller.go @@ -19,6 +19,12 @@ import ( "fmt" "reflect" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -35,7 +41,7 @@ import ( "redhat-developer/red-hat-developer-hub-operator/pkg/model" - bs "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bs "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -44,7 +50,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ) -var recNumber = 0 +var watchedConfigSelector = metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: model.ExtConfigSyncLabel, + Values: []string{"true"}, + Operator: metav1.LabelSelectorOpIn, + }, + }, +} // BackstageReconciler reconciles a Backstage object type BackstageReconciler struct { @@ -53,23 +67,16 @@ type BackstageReconciler struct { // If true, Backstage Controller always sync the state of runtime objects created // otherwise, runtime objects can be re-configured independently OwnsRuntime bool - - // Namespace allows to restrict the reconciliation to this particular namespace, - // and ignore requests from other namespaces. - // This is mostly useful for our tests, to overcome a limitation of EnvTest about namespace deletion. - Namespace string - + // indicates if current cluster is Openshift IsOpenShift bool } //+kubebuilder:rbac:groups=rhdh.redhat.com,resources=backstages,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=rhdh.redhat.com,resources=backstages/status,verbs=get;update;patch //+kubebuilder:rbac:groups=rhdh.redhat.com,resources=backstages/finalizers,verbs=update -//+kubebuilder:rbac:groups="",resources=configmaps;services,verbs=get;watch;create;update;list;delete;patch +//+kubebuilder:rbac:groups="",resources=configmaps;secrets;services,verbs=get;watch;create;update;list;delete;patch //+kubebuilder:rbac:groups="",resources=persistentvolumes;persistentvolumeclaims,verbs=get;list;watch -//+kubebuilder:rbac:groups="",resources=secrets,verbs=create;delete;patch;update -//+kubebuilder:rbac:groups="apps",resources=deployments,verbs=get;watch;create;update;list;delete;patch -//+kubebuilder:rbac:groups="apps",resources=statefulsets,verbs=get;watch;create;update;list;delete;patch +//+kubebuilder:rbac:groups="apps",resources=deployments;statefulsets,verbs=get;watch;create;update;list;delete;patch //+kubebuilder:rbac:groups="route.openshift.io",resources=routes;routes/custom-host,verbs=get;watch;create;update;list;delete;patch // Reconcile is part of the main kubernetes reconciliation loop which aims to @@ -79,16 +86,6 @@ type BackstageReconciler struct { func (r *BackstageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { lg := log.FromContext(ctx) - recNumber = recNumber + 1 - lg.V(1).Info(fmt.Sprintf("starting reconciliation (namespace: %q), number %d", req.NamespacedName, recNumber)) - - // Ignore requests for other namespaces, if specified. - // This is mostly useful for our tests, to overcome a limitation of EnvTest about namespace deletion. - // More details on https://book.kubebuilder.io/reference/envtest.html#namespace-usage-limitation - if r.Namespace != "" && req.Namespace != r.Namespace { - return ctrl.Result{}, nil - } - backstage := bs.Backstage{} if err := r.Get(ctx, req.NamespacedName, &backstage); err != nil { if errors.IsNotFound(err) { @@ -184,11 +181,25 @@ func (r *BackstageReconciler) applyObjects(ctx context.Context, objects []model. } if err := r.patchObject(ctx, baseObject, obj); err != nil { - return fmt.Errorf("failed to patch object %s: %w", obj.Object(), err) + lg.V(1).Info( + "failed to patch object => trying to delete it (and losing any custom labels/annotations on it) so it can be recreated upon next reconciliation...", + objDispName(obj), obj.Object().GetName(), + "cause", err, + ) + // Some resources like StatefulSets allow patching a limited set of fields. A FieldValueForbidden error is returned. + // Some other resources like Services do not support updating the primary/secondary clusterIP || ipFamily. A FieldValueInvalid error is returned. + // That's why we are trying to delete them first, taking care of orphaning the dependents so that they can be retained. + // They will be recreated at the next reconciliation. + // If they cannot be recreated at the next reconciliation, the expected error will be returned. + if err = r.Delete(ctx, baseObject, client.PropagationPolicy(metav1.DeletePropagationOrphan)); err != nil { + return fmt.Errorf("failed to delete object %s so it can be recreated: %w", obj.Object(), err) + } + lg.V(1).Info("deleted object. If you had set any custom labels/annotations on it manually, you will need to add them again", + objDispName(obj), obj.Object().GetName(), + ) + } else { + lg.V(1).Info("patch object ", objDispName(obj), obj.Object().GetName()) } - - lg.V(1).Info("patch object ", objDispName(obj), obj.Object().GetName()) - } return nil } @@ -282,18 +293,104 @@ func setStatusCondition(backstage *bs.Backstage, condType bs.BackstageConditionT }) } +// requestByLabel returns a request with current Namespace and Backstage Object name taken from label +// or empty request object if label not found +func (r *BackstageReconciler) requestByLabel(ctx context.Context, object client.Object) []reconcile.Request { + + lg := log.FromContext(ctx) + + backstageName := object.GetAnnotations()[model.BackstageNameAnnotation] + if backstageName == "" { + lg.V(1).Info(fmt.Sprintf("warning: %s annotation is not defined for %s, Backstage instances will not be reconciled in this loop", model.BackstageNameAnnotation, object.GetName())) + return []reconcile.Request{} + } + + nn := types.NamespacedName{ + Namespace: object.GetNamespace(), + Name: backstageName, + } + + backstage := bs.Backstage{} + if err := r.Get(ctx, nn, &backstage); err != nil { + if !errors.IsNotFound(err) { + lg.Error(err, "request by label failed, get Backstage ") + } + return []reconcile.Request{} + } + + ec, err := r.preprocessSpec(ctx, backstage) + if err != nil { + lg.Error(err, "request by label failed, preprocess Backstage ") + return []reconcile.Request{} + } + + deploy := &appsv1.Deployment{} + if err := r.Get(ctx, types.NamespacedName{Name: model.DeploymentName(backstage.Name), Namespace: object.GetNamespace()}, deploy); err != nil { + if errors.IsNotFound(err) { + lg.V(1).Info("request by label, deployment not found", "name", model.DeploymentName(backstage.Name)) + } else { + lg.Error(err, "request by label failed, get Deployment ", "error ", err) + } + return []reconcile.Request{} + } + + newHash := ec.GetHash() + oldHash := deploy.Spec.Template.ObjectMeta.GetAnnotations()[model.ExtConfigHashAnnotation] + if newHash == oldHash { + lg.V(1).Info("request by label, hash are equal", "hash", newHash) + return []reconcile.Request{} + } + + lg.V(1).Info("enqueuing reconcile for", object.GetObjectKind().GroupVersionKind().Kind, object.GetName(), "new hash: ", newHash, "old hash: ", oldHash) + return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: backstage.Name, Namespace: object.GetNamespace()}}} + +} + // SetupWithManager sets up the controller with the Manager. func (r *BackstageReconciler) SetupWithManager(mgr ctrl.Manager) error { - builder := ctrl.NewControllerManagedBy(mgr). - For(&bs.Backstage{}) + pred, err := predicate.LabelSelectorPredicate(watchedConfigSelector) + if err != nil { + return fmt.Errorf("failed to construct the predicate for matching secrets. This should not happen: %w", err) + } - // [GA] do not remove it - //if r.OwnsRuntime { - // builder.Owns(&appsv1.Deployment{}). - // Owns(&corev1.Service{}). - // Owns(&appsv1.StatefulSet{}) - //} + secretMeta := &metav1.PartialObjectMetadata{} + secretMeta.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Secret", + }) + + configMapMeta := &metav1.PartialObjectMetadata{} + configMapMeta.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + }) - return builder.Complete(r) + b := ctrl.NewControllerManagedBy(mgr). + For(&bs.Backstage{}). + WatchesMetadata( + secretMeta, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { + return r.requestByLabel(ctx, o) + }), + builder.WithPredicates(pred, predicate.Funcs{ + DeleteFunc: func(e event.DeleteEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { return true }, + //CreateFunc: func(e event.CreateEvent) bool { return true }, + }), + ). + WatchesMetadata( + configMapMeta, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { + return r.requestByLabel(ctx, o) + }), + builder.WithPredicates(pred, predicate.Funcs{ + DeleteFunc: func(e event.DeleteEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { return true }, + //CreateFunc: func(e event.CreateEvent) bool { return true }, + })) + + return b.Complete(r) } diff --git a/controllers/mock_client.go b/controllers/mock_client.go new file mode 100644 index 00000000..722e2caf --- /dev/null +++ b/controllers/mock_client.go @@ -0,0 +1,143 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const implementMe = "implement me if needed" + +// Mock K8s go-client with very basic implementation of (some) methods +// to be able to simply test controller logic +type MockClient struct { + objects map[NameKind][]byte +} + +func NewMockClient() MockClient { + return MockClient{ + objects: map[NameKind][]byte{}, + } +} + +type NameKind struct { + Name string + Kind string +} + +func kind(obj runtime.Object) string { + str := reflect.TypeOf(obj).String() + return str[strings.LastIndex(str, ".")+1:] + //return reflect.TypeOf(obj).String() +} + +func (m MockClient) Get(_ context.Context, key client.ObjectKey, obj client.Object, _ ...client.GetOption) error { + + if key.Name == "" { + return fmt.Errorf("get: name should not be empty") + } + uobj := m.objects[NameKind{Name: key.Name, Kind: kind(obj)}] + if uobj == nil { + return errors.NewNotFound(schema.GroupResource{Group: "", Resource: kind(obj)}, key.Name) + } + err := json.Unmarshal(uobj, obj) + if err != nil { + return err + } + return nil +} + +func (m MockClient) List(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { + panic(implementMe) +} + +func (m MockClient) Create(_ context.Context, obj client.Object, _ ...client.CreateOption) error { + if obj.GetName() == "" { + return fmt.Errorf("update: object Name should not be empty") + } + uobj := m.objects[NameKind{Name: obj.GetName(), Kind: kind(obj)}] + if uobj != nil { + return errors.NewAlreadyExists(schema.GroupResource{Group: "", Resource: kind(obj)}, obj.GetName()) + } + dat, err := json.Marshal(obj) + if err != nil { + return err + } + m.objects[NameKind{Name: obj.GetName(), Kind: kind(obj)}] = dat + return nil +} + +func (m MockClient) Delete(_ context.Context, _ client.Object, _ ...client.DeleteOption) error { + panic(implementMe) +} + +func (m MockClient) Update(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { + + if obj.GetName() == "" { + return fmt.Errorf("update: object Name should not be empty") + } + uobj := m.objects[NameKind{Name: obj.GetName(), Kind: kind(obj)}] + if uobj == nil { + return errors.NewNotFound(schema.GroupResource{Group: "", Resource: kind(obj)}, obj.GetName()) + } + dat, err := json.Marshal(obj) + if err != nil { + return err + } + m.objects[NameKind{Name: obj.GetName(), Kind: kind(obj)}] = dat + return nil +} + +func (m MockClient) Patch(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { + panic(implementMe) +} + +func (m MockClient) DeleteAllOf(_ context.Context, _ client.Object, _ ...client.DeleteAllOfOption) error { + panic(implementMe) +} + +func (m MockClient) Status() client.SubResourceWriter { + panic(implementMe) +} + +func (m MockClient) SubResource(_ string) client.SubResourceClient { + panic(implementMe) +} + +func (m MockClient) Scheme() *runtime.Scheme { + panic(implementMe) +} + +func (m MockClient) RESTMapper() meta.RESTMapper { + panic(implementMe) +} + +func (m MockClient) GroupVersionKindFor(_ runtime.Object) (schema.GroupVersionKind, error) { + panic(implementMe) +} + +func (m MockClient) IsObjectNamespaced(_ runtime.Object) (bool, error) { + panic(implementMe) +} diff --git a/controllers/preprocessor_test.go b/controllers/preprocessor_test.go new file mode 100644 index 00000000..9b7ed275 --- /dev/null +++ b/controllers/preprocessor_test.go @@ -0,0 +1,100 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "os" + "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" + "redhat-developer/red-hat-developer-hub-operator/pkg/model" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func updateConfigMap(t *testing.T) BackstageReconciler { + ctx := context.TODO() + + bs := v1alpha2.Backstage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bs1", + Namespace: "ns1", + }, + Spec: v1alpha2.BackstageSpec{ + Application: &v1alpha2.Application{ + AppConfig: &v1alpha2.AppConfig{ + ConfigMaps: []v1alpha2.ObjectKeyRef{{Name: "cm1"}}, + }, + }, + }, + } + + cm := corev1.ConfigMap{} + cm.Name = "cm1" + + rc := BackstageReconciler{ + Client: NewMockClient(), + } + + assert.NoError(t, rc.Create(ctx, &cm)) + + // reconcile + extConf, err := rc.preprocessSpec(ctx, bs) + assert.NoError(t, err) + + assert.NotNil(t, extConf.AppConfigs["cm1"].Labels) + assert.Equal(t, 1, len(extConf.AppConfigs["cm1"].Labels)) + oldHash := extConf.GetHash() + + // Update ConfigMap with new data + err = rc.Get(ctx, types.NamespacedName{Namespace: "ns1", Name: "cm1"}, &cm) + assert.NoError(t, err) + cm.Data = map[string]string{"key": "value"} + err = rc.Update(ctx, &cm) + assert.NoError(t, err) + + // reconcile again + extConf, err = rc.preprocessSpec(ctx, bs) + assert.NoError(t, err) + + assert.NotEqual(t, oldHash, extConf.GetHash()) + + return rc +} + +func TestExtConfigChanged(t *testing.T) { + + ctx := context.TODO() + cm := corev1.ConfigMap{} + + rc := updateConfigMap(t) + err := rc.Get(ctx, types.NamespacedName{Namespace: "ns1", Name: "cm1"}, &cm) + assert.NoError(t, err) + // true : Backstage will be reconciled + assert.Equal(t, "true", cm.Labels[model.ExtConfigSyncLabel]) + + err = os.Setenv(AutoSyncEnvVar, "false") + assert.NoError(t, err) + + rc = updateConfigMap(t) + err = rc.Get(ctx, types.NamespacedName{Namespace: "ns1", Name: "cm1"}, &cm) + assert.NoError(t, err) + // false : Backstage will not be reconciled + assert.Equal(t, "false", cm.Labels[model.ExtConfigSyncLabel]) + +} diff --git a/controllers/spec_preprocessor.go b/controllers/spec_preprocessor.go index 37c5fbcc..fd52f545 100644 --- a/controllers/spec_preprocessor.go +++ b/controllers/spec_preprocessor.go @@ -17,8 +17,16 @@ package controller import ( "context" "fmt" + "os" + "strconv" - bs "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + + "sigs.k8s.io/controller-runtime/pkg/log" + + "sigs.k8s.io/controller-runtime/pkg/client" + + bs "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/model" @@ -26,6 +34,8 @@ import ( "k8s.io/apimachinery/pkg/types" ) +const AutoSyncEnvVar = "EXT_CONF_SYNC_backstage" + // Add additional details to the Backstage Spec helping in making Backstage RuntimeObjects Model // Validates Backstage Spec and fails fast if something not correct func (r *BackstageReconciler) preprocessSpec(ctx context.Context, backstage bs.Backstage) (model.ExternalConfig, error) { @@ -34,28 +44,23 @@ func (r *BackstageReconciler) preprocessSpec(ctx context.Context, backstage bs.B bsSpec := backstage.Spec ns := backstage.Namespace - result := model.ExternalConfig{ - RawConfig: map[string]string{}, - AppConfigs: map[string]corev1.ConfigMap{}, - ExtraFileConfigMaps: map[string]corev1.ConfigMap{}, - ExtraEnvConfigMaps: map[string]corev1.ConfigMap{}, - } + result := model.NewExternalConfig() // Process RawConfig if bsSpec.RawRuntimeConfig != nil { if bsSpec.RawRuntimeConfig.BackstageConfigName != "" { - cm := corev1.ConfigMap{} - if err := r.Get(ctx, types.NamespacedName{Name: bsSpec.RawRuntimeConfig.BackstageConfigName, Namespace: ns}, &cm); err != nil { - return result, fmt.Errorf("failed to load rawConfig %s: %w", bsSpec.RawRuntimeConfig.BackstageConfigName, err) + cm := &corev1.ConfigMap{} + if err := r.addExtConfig(&result, ctx, cm, backstage.Name, bsSpec.RawRuntimeConfig.BackstageConfigName, ns); err != nil { + return result, err } for key, value := range cm.Data { result.RawConfig[key] = value } } if bsSpec.RawRuntimeConfig.LocalDbConfigName != "" { - cm := corev1.ConfigMap{} - if err := r.Get(ctx, types.NamespacedName{Name: bsSpec.RawRuntimeConfig.LocalDbConfigName, Namespace: ns}, &cm); err != nil { - return result, fmt.Errorf("failed to load rawConfig %s: %w", bsSpec.RawRuntimeConfig.LocalDbConfigName, err) + cm := &corev1.ConfigMap{} + if err := r.addExtConfig(&result, ctx, cm, backstage.Name, bsSpec.RawRuntimeConfig.LocalDbConfigName, ns); err != nil { + return result, err } for key, value := range cm.Data { result.RawConfig[key] = value @@ -69,47 +74,109 @@ func (r *BackstageReconciler) preprocessSpec(ctx context.Context, backstage bs.B // Process AppConfigs if bsSpec.Application.AppConfig != nil { - //mountPath := bsSpec.Application.AppConfig.MountPath for _, ac := range bsSpec.Application.AppConfig.ConfigMaps { - cm := corev1.ConfigMap{} - if err := r.Get(ctx, types.NamespacedName{Name: ac.Name, Namespace: ns}, &cm); err != nil { - return result, fmt.Errorf("failed to get configMap %s: %w", ac.Name, err) + cm := &corev1.ConfigMap{} + if err := r.addExtConfig(&result, ctx, cm, backstage.Name, ac.Name, ns); err != nil { + return result, err } - result.AppConfigs[cm.Name] = cm + result.AppConfigs[ac.Name] = *cm } } // Process ConfigMapFiles if bsSpec.Application.ExtraFiles != nil && bsSpec.Application.ExtraFiles.ConfigMaps != nil { for _, ef := range bsSpec.Application.ExtraFiles.ConfigMaps { - cm := corev1.ConfigMap{} - if err := r.Get(ctx, types.NamespacedName{Name: ef.Name, Namespace: ns}, &cm); err != nil { - return result, fmt.Errorf("failed to get ConfigMap %s: %w", ef.Name, err) + cm := &corev1.ConfigMap{} + if err := r.addExtConfig(&result, ctx, cm, backstage.Name, ef.Name, ns); err != nil { + return result, err + } + result.ExtraFileConfigMaps[cm.Name] = *cm + } + } + + // Process SecretFiles + if bsSpec.Application.ExtraFiles != nil && bsSpec.Application.ExtraFiles.Secrets != nil { + for _, ef := range bsSpec.Application.ExtraFiles.Secrets { + secret := &corev1.Secret{} + if err := r.addExtConfig(&result, ctx, secret, backstage.Name, ef.Name, ns); err != nil { + return result, err } - result.ExtraFileConfigMaps[cm.Name] = cm + result.ExtraFileSecrets[secret.Name] = *secret } } // Process ConfigMapEnvs if bsSpec.Application.ExtraEnvs != nil && bsSpec.Application.ExtraEnvs.ConfigMaps != nil { for _, ee := range bsSpec.Application.ExtraEnvs.ConfigMaps { - cm := corev1.ConfigMap{} - if err := r.Get(ctx, types.NamespacedName{Name: ee.Name, Namespace: ns}, &cm); err != nil { - return result, fmt.Errorf("failed to get configMap %s: %w", ee.Name, err) + cm := &corev1.ConfigMap{} + if err := r.addExtConfig(&result, ctx, cm, backstage.Name, ee.Name, ns); err != nil { + return result, err + } + result.ExtraEnvConfigMaps[cm.Name] = *cm + } + } + + // Process SecretEnvs + if bsSpec.Application.ExtraEnvs != nil && bsSpec.Application.ExtraEnvs.Secrets != nil { + for _, ee := range bsSpec.Application.ExtraEnvs.Secrets { + secret := &corev1.Secret{} + if err := r.addExtConfig(&result, ctx, secret, backstage.Name, ee.Name, ns); err != nil { + return result, err } - result.ExtraEnvConfigMaps[cm.Name] = cm + result.ExtraEnvSecrets[secret.Name] = *secret } } // Process DynamicPlugins if bsSpec.Application.DynamicPluginsConfigMapName != "" { - cm := corev1.ConfigMap{} - if err := r.Get(ctx, types.NamespacedName{Name: bsSpec.Application.DynamicPluginsConfigMapName, - Namespace: ns}, &cm); err != nil { - return result, fmt.Errorf("failed to get ConfigMap %v: %w", cm, err) + cm := &corev1.ConfigMap{} + if err := r.addExtConfig(&result, ctx, cm, backstage.Name, bsSpec.Application.DynamicPluginsConfigMapName, ns); err != nil { + return result, err } - result.DynamicPlugins = cm + result.DynamicPlugins = *cm } return result, nil } + +func (r *BackstageReconciler) addExtConfig(config *model.ExternalConfig, ctx context.Context, obj client.Object, backstageName, objectName, ns string) error { + + lg := log.FromContext(ctx) + + if err := r.Get(ctx, types.NamespacedName{Name: objectName, Namespace: ns}, obj); err != nil { + if _, ok := obj.(*corev1.Secret); ok && errors.IsForbidden(err) { + return fmt.Errorf("warning: Secrets GET is forbidden, updating Secrets may not cause Pod recreating") + } + return fmt.Errorf("failed to get external config from %s: %s", objectName, err) + } + + if err := config.AddToSyncedConfig(obj); err != nil { + return fmt.Errorf("failed to add to synced %s: %s", obj.GetName(), err) + } + + if obj.GetLabels() == nil { + obj.SetLabels(map[string]string{}) + } + if obj.GetAnnotations() == nil { + obj.SetAnnotations(map[string]string{}) + } + + autoSync := true + autoSyncStr, ok := os.LookupEnv(AutoSyncEnvVar) + if ok { + autoSync, _ = strconv.ParseBool(autoSyncStr) + } + + if obj.GetLabels()[model.ExtConfigSyncLabel] == "" || obj.GetAnnotations()[model.BackstageNameAnnotation] == "" || + obj.GetLabels()[model.ExtConfigSyncLabel] != strconv.FormatBool(autoSync) { + + obj.GetLabels()[model.ExtConfigSyncLabel] = strconv.FormatBool(autoSync) + obj.GetAnnotations()[model.BackstageNameAnnotation] = backstageName + if err := r.Update(ctx, obj); err != nil { + return fmt.Errorf("failed to update external config %s: %s", objectName, err) + } + lg.V(1).Info(fmt.Sprintf("update external config %s with label %s=%s and annotation %s=%s", obj.GetName(), model.ExtConfigSyncLabel, strconv.FormatBool(autoSync), model.BackstageNameAnnotation, backstageName)) + } + + return nil +} diff --git a/docker/Dockerfile b/docker/Dockerfile index bfa0469c..7f24fa25 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -13,7 +13,7 @@ # limitations under the License. #@follow_tag(registry.redhat.io/rhel9/go-toolset:latest) -FROM registry.access.redhat.com/ubi9/go-toolset:1.20.12-3.1712567214 AS builder +FROM registry.access.redhat.com/ubi9/go-toolset:1.21.11-2 AS builder # hadolint ignore=DL3002 USER 0 ENV GOPATH=/go/ @@ -54,7 +54,7 @@ RUN export ARCH="$(uname -m)" && if [[ ${ARCH} == "x86_64" ]]; then export ARCH= # Install openssl for FIPS support #@follow_tag(registry.redhat.io/ubi9/ubi-minimal:latest) -FROM registry.access.redhat.com/ubi9-minimal:9.3-1612 AS runtime +FROM registry.access.redhat.com/ubi9-minimal:9.4-1134 AS runtime RUN microdnf update --setopt=install_weak_deps=0 -y && microdnf install -y openssl; microdnf clean -y all # Upstream sources diff --git a/docs/admin.md b/docs/admin.md index 9bf9bfeb..e806a3c1 100644 --- a/docs/admin.md +++ b/docs/admin.md @@ -29,17 +29,16 @@ Mapping of configMap keys (yaml files) to runtime objects (NOTE: for the time (D | service.yaml | corev1.Service | Yes | all | Backstage Service | | db-statefulset.yaml | appsv1.Statefulset | For DB enabled | all | PostgreSQL StatefulSet | | db-service.yaml | corev1.Service | For DB enabled | all | PostgreSQL Service | -| db-service-hl.yaml | corev1.Service | For DB enabled | all | PostgreSQL Service | | db-secret.yaml | corev1.Secret | For DB enabled | all | Secret to connect Backstage to PSQL | | route.yaml | openshift.Route | No (for OCP) | all | Route exposing Backstage service | -| app-config.yaml | corev1.ConfigMap | No | 0.0.2 | Backstage app-config.yaml | -| configmap-files.yaml | corev1.ConfigMap | No | 0.0.2 | Backstage config file inclusions from configMap | -| configmap-envs.yaml | corev1.ConfigMap | No | 0.0.2 | Backstage env variables from configMap | -| secret-files.yaml | corev1.Secret | No | 0.0.2 | Backstage config file inclusions from Secret | -| secret-envs.yaml | corev1.Secret | No | 0.0.2 | Backstage env variables from Secret | -| dynamic-plugins.yaml | corev1.ConfigMap | No | 0.0.2 | dynamic-plugins config * | -| dynamic-plugins-configmap.yaml | corev1.ConfigMap | No | 0.0.1 | dynamic-plugins config * | -| backend-auth-configmap.yaml | corev1.ConfigMap | No | 0.0.1 | backend auth config | +| app-config.yaml | corev1.ConfigMap | No | 0.2.0 | Backstage app-config.yaml | +| configmap-files.yaml | corev1.ConfigMap | No | 0.2.0 | Backstage config file inclusions from configMap | +| configmap-envs.yaml | corev1.ConfigMap | No | 0.2.0 | Backstage env variables from configMap | +| secret-files.yaml | corev1.Secret | No | 0.2.0 | Backstage config file inclusions from Secret | +| secret-envs.yaml | corev1.Secret | No | 0.2.0 | Backstage env variables from Secret | +| dynamic-plugins.yaml | corev1.ConfigMap | No | 0.2.0 | dynamic-plugins config * | +| dynamic-plugins-configmap.yaml | corev1.ConfigMap | No | 0.1.0 | dynamic-plugins config * | +| backend-auth-configmap.yaml | corev1.ConfigMap | No | 0.1.0 | backend auth config | NOTES: diff --git a/docs/db_migration.md b/docs/db_migration.md new file mode 100644 index 00000000..4e9d5375 --- /dev/null +++ b/docs/db_migration.md @@ -0,0 +1,135 @@ +## Move Backstage databases to external DB server + +By default, Backstage hosts data for each plugin, so there are usually several databases, depending on the number of plugins, those databases usually prefixed with "backstage_plugin_". + +```` +postgres=> \l +List of databases +Name | Owner | Encoding | Locale Provider | Collate | Ctype | +----------------------------+----------+----------+-----------------+-------------+-------------+ +backstage_plugin_app | postgres | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | +backstage_plugin_auth | postgres | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | +backstage_plugin_catalog | postgres | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | +backstage_plugin_permission | postgres | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | +backstage_plugin_scaffolder | postgres | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | +backstage_plugin_search | postgres | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | +postgres | postgres | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | +```` + +To move the data from working Backstage instance hosted on a local PostgreSQL server to a production-ready PostgreSQL service (such as AWS RDB or Azure Database), you can use directly PostgreSQL utilities such as [pg_dump](https://www.postgresql.org/docs/current/app-pgdump.html) with [psql](https://www.postgresql.org/docs/current/app-psql.html) or [pgAdmin](https://www.pgadmin.org/) and move the data from each database one-by-one. +To simplify this process, we have a [**db_copy.sh**](../hack/db_copy.sh) script. + +### Prerequisites + +- [**pg_dump**](https://www.postgresql.org/docs/current/backup-dump.html) and [**psql**](https://www.postgresql.org/docs/current/app-psql.html) client utilities installed on your local machine. +- For data export the **PGSQL user** sufficient privileges to make a full dump of source (local) databases +- For data import the **PGSQL user** sufficient admin privileges to create (external) databases and populate it with database dumps + + +### Make [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) of the source (local) database pod. + +```` +kubectl port-forward -n : +```` + +Where: + +- **pgsql-pod-name** a name of PostgreSQL pod with format like backstage-psql--<_index> +- **forward-to-port** port of your choice to forward PGSQL to +- **forward-from-port** PGSQL port, usually 5432 + +For example: + +```` +kubectl port-forward -n backstage backstage-psql-backstage1-0 15432:5432 +Forwarding from 127.0.0.1:15432 -> 5432 +Forwarding from [::1]:15432 -> 5432 +```` +**NOTE:** it has to be run on a dedicated terminal and interrupted as soon as data copy is completed. + +### Configure PGSQL connection + +Make a copy of **db_copy.sh** script and modify it according to your configuration: + +* **to_host**=destination host name (e g #.#.#.rds.amazonaws.com) +* **to_port**=destination port (usually 5432) +* **to_user**=destination server username (e g postgres) +* **from_host**=usually 127.0.0.1 +* **from_port**=< forward-to-port > +* **from_user**=source server username (e g postgres) +* **allDB**=name of databases for import in double quotes separated by spaces, e g ("backstage_plugin_app" "backstage_plugin_auth" "backstage_plugin_catalog" "backstage_plugin_permission" "backstage_plugin_scaffolder" "backstage_plugin_search") + +### Create destination databases and copy data + +```` +/bin/bash TO_PSW= /path/to/db_copy.sh +```` + +It will produce some output about **pg_dump** and **psql** progressing. +When successfully finished you can stop port forwarding. + +**NOTE:** In case if your databases are quite big already, you may consider using compression tools as [documented](https://www.postgresql.org/docs/current/backup-dump.html#BACKUP-DUMP-LARGE) + +### Reconfigure Backstage Custom Resource + +Reconfigure Backstage according to [External DB configuration](external-db.md) i.e. +* Create external DB connection Secret +* Create a Secret with certificate +* Configure CR disabling local DB and adding those 2 Secrets as extraEnv and extraFile accordingly. + +At the end your Backstage.spec CR should contain the following: +```` +spec: + database: + enableLocalDb: false + application: + ... + extraFiles: + secrets: + - name: + key: postgres-crt.pem # key name as in Secret + extraEnvs: + secrets: + - name: + ... +```` +Apply these changes. + +### Clean local Persistence Volume + +When Backstage is reconfigured with **spec.database.enableLocalDb: false** it deletes corresponding StatefulSet and Pod(s) but Persistence Volume Claim and associated Persistence Volume retained. +You need to clean it manually with + +```` + kubectl -n backstage delete pvc +```` + +Where **local-psql-pvc-name** has format **data- (see above) + + +### Troubleshooting + +Backstage container may fail with Crash Loop Backoff error and "Can't take lock to run migrations: Migration table is already locked" log error: + +```` +2024-06-02T20:37:44.941Z catalog info Performing database migration │ +Can't take lock to run migrations: Migration table is already locked │ +If you are sure migrations are not running you can release the lock manually by running 'knex migrate:unlock' │ +/opt/app-root/src/node_modules/@backstage/backend-app-api/dist/index.cjs.js:1793 │ + throw new errors.ForwardedError( │ + ^ │ + MigrationLocked: Plugin 'auth' startup failed; caused by MigrationLocked: Migration table is already locked │ +```` + +A way to make it work without knex utility is to delete the data from the **knex_migrations_lock** table for each problematic Backstage plugin's database (in the example above it is **'auth'** plugin, so corresponding database is **backstage_plugin_auth**): + +```` +psql -h -U -d -c "delete from knex_migrations_lock;" +```` + +**NOTE:** in some DB the table is called differently like: **backstage_backend_tasks__knex_migrations_lock** in the **backstage_plugin_search** database + + + + + diff --git a/docs/developer.md b/docs/developer.md index 2812d72d..7f4fe833 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -1,21 +1,21 @@ -# DEVELOPER GUIDE --- WIP +# Developer Guide ## Contributing // TODO(user): Add detailed information on how you would like others to contribute to this project + ### How it works This project aims to follow the Kubernetes [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/). It uses [Controllers](https://kubernetes.io/docs/concepts/architecture/controller/) which provides a reconcile function responsible for synchronizing resources until the desired state is reached on the cluster. - ## Local development ### Prerequisites * **kubectl**. See [Instaling kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). -* **minikube**. See [Instaling minkube](https://kubernetes.io/docs/tasks/tools/#minikube). +* Available local or remote Kubernetes cluster with cluster admin privileges. For instance **minikube**. See [Instaling minkube](https://kubernetes.io/docs/tasks/tools/#minikube). * A copy of the Backstage Operator sources: ```sh git clone https://github.com/janus-idp/operator @@ -23,93 +23,128 @@ git clone https://github.com/janus-idp/operator ### Local Tests -To run both unit tests (since 0.0.2) and Kubernetes integration tests ([envtest](https://book.kubebuilder.io/reference/envtest.html)): +To run: +* all the unit tests +* part of [Integration Tests](../integration_tests/README.md) which does not require a real cluster. ```sh make test ``` -### Test on the local cluster +It only takes a few seconds to run, but covers quite a lot of functionality. For early regression detection, it is recommended to run it as often as possible during development. -You’ll need a Kubernetes cluster to run against. -You can use [minikube](https://kubernetes.io/docs/tasks/tools/#minikube) or [kind](https://kubernetes.io/docs/tasks/tools/#kind) to get a local cluster for testing, or run against a remote cluster. +### Test on the cluster -**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows). +For testing, you will need a Kubernetes cluster, either remote (with sufficient admin rights) or local, such as [minikube](https://kubernetes.io/docs/tasks/tools/#minikube) or [kind](https://kubernetes.io/docs/tasks/tools/#kind) - Build and push your image to the location specified by `IMG`: ```sh make image-build image-push IMG=/backstage-operator:tag ``` -- Install the CRDs into the local cluster (minikube is installed and running): +- Install the Custom Resource Definitions into the local cluster (minikube is installed and running): ```sh make install ``` +**IMPORTANT:** If you are editing the CRDs, make sure you reinstall it before deploying. -- You can run your controller standalone (this will run in the foreground, so switch to a new terminal if you want to leave it running) -This way you can see controllers log just in your terminal window which is quite convenient for debugging: +- To delete the CRDs from the cluster: ```sh -make run +make uninstall ``` -- Or deploy the controller to the cluster with the image specified by `IMG`: +### Run the controller standalone + +You can run your controller standalone (this will run in the foreground, so switch to a new terminal if you want to leave it running) +This way you can see controllers log just in your terminal window which is quite convenient for debugging. ```sh -make deploy [IMG=/backstage-operator:tag] +make [install] run ``` -- To generate deployment manifest, use: +You can use it for manual and automated ([such as](../integration_tests/README.md) `USE_EXISTING_CLUSTER=true make integration-test`) tests efficiently, but, note, RBAC is not working with this kind of deployment. + +### Deploy operator to the real cluster + +For development, most probably, you will need to specify the image you build and push: ```sh -make deployment-manifest [IMG=/backstage-operator:tag] +make deploy [IMG=/backstage-operator[:tag]] ``` -it will create the file rhdh-operator-${VERSION}.yaml on the project root and you will be able to share it to make it possible to deploy operator with: + +To undeploy the controller from the cluster: ```sh -kubectl apply -f +make undeploy ``` -### Uninstall CRDs -To delete the CRDs from the cluster: +- To generate deployment manifest, use: ```sh -make uninstall +make deployment-manifest [IMG=/backstage-operator:tag] ``` -### Undeploy controller -UnDeploy the controller from the cluster: +it will create the file rhdh-operator-${VERSION}.yaml on the project root and you will be able to share it to make it possible to deploy operator with: ```sh -make undeploy +kubectl apply -f ``` -### Build and Deploy with OLM: -1. To build operator, bundle and catalog images: + +### Deploy with Operator Lifecycle Manager (valid for v0.3.0+): + +#### OLM + +Make sure your cluster supports **OLM**. For instance [Openshift](https://www.redhat.com/en/technologies/cloud-computing/openshift) supports it out of the box. +If needed install it using: + ```sh -make release-build +make install-olm ``` -2. To push operator, bundle and catalog images to the registry: + +#### Build and push images + +There are a bunch of commands to build and push to the registry necessary images. +For development purpose, most probably, you will need to specify the image you build and push with IMAGE_TAG_BASE env variable: + +* `[IMAGE_TAG_BASE=/backstage-operator] make image-build` builds operator manager image (**backstage-operator**) +* `[IMAGE_TAG_BASE=/backstage-operator] make image-push` pushes operator manager image to **your-registry** +* `[IMAGE_TAG_BASE=/backstage-operator] make bundle-build` builds operator manager image (**backstage-operator-bundle**) +* `[IMAGE_TAG_BASE=/backstage-operator] make bundle-push` pushes bundle image to **your-registry** +* `[IMAGE_TAG_BASE=/backstage-operator] make catalog-build` builds catalog image (**backstage-operator-catalog**) +* `[IMAGE_TAG_BASE=/backstage-operator] make catalog-push` pushes catalog image to **your-registry** + +You can do it all together using: ```sh -make release-push +[IMAGE_TAG_BASE=/backstage-operator] make release-build release-push ``` -3. To deploy or update catalog source: + +#### Deploy or update the Catalog Source + ```sh -make catalog-update +[OLM_NAMESPACE=] [IMAGE_TAG_BASE=/backstage-operator] make catalog-update ``` -4. To deloy the operator with OLM +You can point the namespace where OLM installed. By default, in a vanilla Kubernetes, OLM os deployed on 'olm' namespace. In Openshift you have to explicitly point it to **openshift-marketplace** namespace. + +#### Deploy the Operator with OLM +Default namespace to deploy the Operator is called **backstage-system** , this name fits one defined in [kustomization.yaml](../config/default/kustomization.yaml). So, if you consider changing it you have to change it in this file and define **OPERATOR_NAMESPACE** environment variable. +Following command creates OperatorGroup and Subscription on Operator namespace ```sh -make deploy-olm +[OPERATOR_NAMESPACE=] make deploy-olm ``` -5. To undeploy the operator with OLM +To undeploy the Operator ```sh make undeploy-olm ``` -6. To deploy the operator to Openshift with OLM +#### Convenient commands to build and deploy operator with OLM + +**NOTE:** OLM has to be installed as a prerequisite + +* To build and deploy the operator to vanilla Kubernetes with OLM ```sh -make deploy-openshift [IMAGE_TAG_BASE=/backstage-operator] +[IMAGE_TAG_BASE=/backstage-operator] make deploy-k8s-olm ``` -### Modifying the API definitions -If you are editing the API definitions, make sure you: -- run `make install` before deploying the operator with `make deploy` -- regenerate the manifests and bundle if you plan to deploy the operator with OLM using: +* To build and deploy the operator to Openshift with OLM ```sh -make manifests bundle +[IMAGE_TAG_BASE=/backstage-operator] make deploy-openshift ``` + + **NOTE:** Run `make help` for more information on all potential `make` targets More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) diff --git a/docs/external-db.md b/docs/external-db.md new file mode 100644 index 00000000..1bd2e2d9 --- /dev/null +++ b/docs/external-db.md @@ -0,0 +1,161 @@ +# External DB integration + +Backstage hosts the data in a [PostgreSQL database](https://backstage.io/docs/getting-started/config/database/). +By default, the Operator creates and manages a local instance of PostgreSQL in the same namespace as the Backstage deployment but it also allows to switch this off and configure an external database server instead. +Usually, external connection requires more security, so, this instruction includes steps to configure SSL/TLS. + +### Configure your external PostgreSQL instance +As a prerequisite, you have to know: +- **db-host** - your PostgreSQL instance DNS or IP address +- **db-port** - your PostgreSQL instance port number (usually 5432) +- **username** - to connect to your PostgreSQL instance +- **password** - to connect to your PostgreSQL instance + +**NOTE:** By default, Backstage uses databases for each plugin and automatically creates them if none are found, so in addition to PSQL Database level privileges, the user may need Create Database privilege. + +In addition, to get your database connection secured with SSL/TLS, you also need certificates in the form of PEM file. + +You can find configuration guidelines for: +- [AWS RDS PostgreSQL](#aws-rds-postgresql) +- [Azure Database PostgreSQL](#azure-db-postgresql) + +### Create secret with PostgreSQL connection properties: +````yaml +cat < create -f - +apiVersion: v1 +kind: Secret +metadata: + name: +type: Opaque +stringData: + POSTGRES_PASSWORD: + POSTGRES_PORT: + POSTGRES_USER: + POSTGRES_HOST: + PGSSLMODE: require # for TLS connection + NODE_EXTRA_CA_CERTS: # for TLS connection, e.g. /opt/app-root/src/postgres-crt.pem +EOF +```` + +### Create secret with certificate(s): +(omit this step if you do not need TLS connection, maybe for testing purpose) + +````yaml +cat < create -f - +apiVersion: v1 +kind: Secret +metadata: + name: +type: Opaque +stringData: + postgres-crt.pem: |- + -----BEGIN CERTIFICATE----- + MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl + ... +```` + +### Create Backstage Custom Resource: + +- disable creating local PostgreSQL instance with **spec.database.enableLocalDb: false** +- add **** to **spec.application.extraFiles.secrets**, so, as for example below **postgres-crt.pem** file will be mounted to Backstage container at **spec.application.extraFiles.mountPath** directory: +- add **** to **spec.application.extraEnvs.secrets**, so all the data's entries will be injected to Backstage container as environment variables. + +**NOTE:** environment variables listed in **** file work with default Operator configuration. If it is changed on default or raw configuration, you have to re-configure it accordingly. + +````yaml +cat < create -f - +apiVersion: rhdh.redhat.com/v1alpha1 +kind: Backstage +metadata: + name: +spec: + database: + enableLocalDb: false + application: + extraFiles: + mountPath: # e g /opt/app-root/src + secrets: + - name: + key: postgres-crt.pem # key name as in Secret + extraEnvs: + secrets: + - name: +```` + +## External PostgreSQL types + +### AWS RDS PostgreSQL +(Tested on PGSQL 15) + +#### Prerequisites +- An AWS account with an active subscription and a PostgreSQL instance on [Amazon RDS for PostgreSQL](https://aws.amazon.com/rds/postgresql/) +- (Optionally) Pgsql client installed to check your database connections + +#### Preparation +- (Optionally) Check your Database connection: + +```` +psql -h -p -U +```` + +- Enter the and output should be something like: + +```` +SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, compression: off) +Type "help" for help. +postgres=> +```` + +(type ‘\q’ to quit from psql CLI) + +**TIP:** The most probable reason for an unsuccessful connection is not properly configured Security Group inbound rule. Make sure you have one enabled for external connection. + +- Download a certificate bundle [Certificate bundles for all AWS Regions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.CertificatesAllRegions) or [Certificate bundles for specific AWS Region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.RegionCertificates). + +**NOTE:** AWS RDS **enforces** connecting your client applications using Transport Layer Security (TLS) starting from PGSQL v 15 . You can disable it adding Parameter Group and setting **rds.force-ssl=0** + +- Use this PEM file (**postgres-crt.pem**) as a data for the **** Secret above. + +### Azure DB PostgreSQL +(Tested on PGSQL 15) + +#### Prerequisites +- An [Azure](https://azure.microsoft.com/) account with an active subscription and [Azure Database for PostgreSQL - Flexible Server instance](https://learn.microsoft.com/en-gb/azure/postgresql/flexible-server/overview). +- (Optionally) Pgsql client installed to check your database connections + +#### Preparation + +- (Optionally) Check your Database connection: + +```` +psql -h -p -U +```` + +Enter the and output should be something like: +```` +SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, compression: off) +Type "help" for help. +postgres=> +```` +(type ‘\q’ to quit from psql CLI) + +**TIP**: The most probable reason for an unsuccessful connection is not appropriate for public access Firewall rules. + + +- Download Microsoft RSA Root Certificate Authority 2017 and DigiCert Global Root CA certificates from the URIs provided [here](https://learn.microsoft.com/en-gb/azure/postgresql/flexible-server/concepts-networking-ssl-tls#downloading-root-ca-certificates-and-updating-application-clients-in-certificate-pinning-scenarios) + +**NOTE:** Azure Database for PostgreSQL flexible server **enforces** connecting your client applications using Transport Layer Security (TLS) + +- Convert .crt files you downloaded to .pem format as [suggested](https://learn.microsoft.com/en-gb/azure/postgresql/flexible-server/concepts-networking-ssl-tls#downloading-root-ca-certificates-and-updating-application-clients-in-certificate-pinning-scenarios) using + +```` +openssl x509 -in DigiCertGlobalRootCA.crt -out DigiCertGlobalRootCA.crt.pem -outform PEM + +openssl x509 -in "Microsoft ECC Root Certificate Authority 2017.crt" -out "Microsoft ECC Root Certificate Authority 2017.crt.pem" -outform PEM +```` + +- Combine them according to this [suggestion](https://learn.microsoft.com/en-gb/azure/postgresql/flexible-server/how-to-update-client-certificates-java#updating-root-ca-certificates-for-other-clients-for-certificate-pinning-scenarios), like: + +```` +cat DigiCertGlobalRootCA.crt.pem "Microsoft ECC Root Certificate Authority 2017.crt.pem" > postgres-crt.pem +```` \ No newline at end of file diff --git a/examples/bs-existing-secret.yaml b/examples/bs-existing-secret.yaml index 51846725..d4069195 100644 --- a/examples/bs-existing-secret.yaml +++ b/examples/bs-existing-secret.yaml @@ -1,4 +1,4 @@ -apiVersion: rhdh.redhat.com/v1alpha1 +apiVersion: rhdh.redhat.com/v1alpha2 kind: Backstage metadata: name: bs-existing-secret @@ -13,8 +13,8 @@ metadata: name: existing-postgres-secret type: Opaque stringData: - POSTGRES_PASSWORD: admin123 + POSTGRES_PASSWORD: "admin123" POSTGRES_PORT: "5432" - POSTGRES_USER: postgres - POSTGRESQL_ADMIN_PASSWORD: admin123 - POSTGRES_HOST: bs-existing-secret-backstage-db + POSTGRES_USER: "postgres" + POSTGRESQL_ADMIN_PASSWORD: "admin123" + POSTGRES_HOST: "backstage-psql-bs-existing-secret" diff --git a/examples/bs-route-disabled.yaml b/examples/bs-route-disabled.yaml index dd35530c..25ecbee9 100644 --- a/examples/bs-route-disabled.yaml +++ b/examples/bs-route-disabled.yaml @@ -1,4 +1,4 @@ -apiVersion: rhdh.redhat.com/v1alpha1 +apiVersion: rhdh.redhat.com/v1alpha2 kind: Backstage metadata: name: bs-route-disabled diff --git a/examples/bs-route.yaml b/examples/bs-route.yaml index 8ec5838f..e9f03790 100644 --- a/examples/bs-route.yaml +++ b/examples/bs-route.yaml @@ -1,4 +1,4 @@ -apiVersion: rhdh.redhat.com/v1alpha1 +apiVersion: rhdh.redhat.com/v1alpha2 kind: Backstage metadata: name: bs-route diff --git a/examples/bs1.yaml b/examples/bs1.yaml index a1b7a90f..9d5747d4 100644 --- a/examples/bs1.yaml +++ b/examples/bs1.yaml @@ -1,7 +1,4 @@ -apiVersion: rhdh.redhat.com/v1alpha1 +apiVersion: rhdh.redhat.com/v1alpha2 kind: Backstage metadata: name: bs1 - - - diff --git a/examples/postgres-secret.yaml b/examples/postgres-secret.yaml index 9ed82362..1b0123e6 100644 --- a/examples/postgres-secret.yaml +++ b/examples/postgres-secret.yaml @@ -2,7 +2,6 @@ apiVersion: v1 kind: Secret metadata: name: postgres-secrets - namespace: backstage type: Opaque stringData: POSTGRES_PASSWORD: admin12345 diff --git a/examples/rhdh-cr-with-app-configs.yaml b/examples/rhdh-cr-with-app-configs.yaml index a9e67f1d..a76de415 100644 --- a/examples/rhdh-cr-with-app-configs.yaml +++ b/examples/rhdh-cr-with-app-configs.yaml @@ -1,4 +1,4 @@ -apiVersion: rhdh.redhat.com/v1alpha1 +apiVersion: rhdh.redhat.com/v1alpha2 kind: Backstage metadata: name: bs-app-config @@ -46,9 +46,17 @@ data: "app-config.backend-auth.yaml": | backend: auth: - keys: - - secret: "${BACKEND_SECRET}" - + externalAccess: + - type: legacy + options: + subject: legacy-default-config + secret: "${BACKEND_SECRET}" + auth: + environment: development + providers: + guest: + # using the guest user to query the '/api/dynamic-plugins-info/loaded-plugins' endpoint. + dangerouslyAllowOutsideDevelopment: true --- apiVersion: v1 kind: Secret @@ -126,7 +134,7 @@ data: initialDelay: { seconds: 15} - package: ./dynamic-plugins/dist/backstage-plugin-techdocs-backend-dynamic pluginConfig: - # Reference documentation http://backstage.io/docs/features/techdocs/configuration + # Reference documentation https://backstage.io/docs/features/techdocs/configuration # Note: After experimenting with basic setup, use CI/CD to generate docs # and an external cloud storage when deploying TechDocs for production use-case. # https://backstage.io/docs/features/techdocs/how-to-guides#how-to-migrate-from-techdocs-basic-to-recommended-deployment-approach @@ -141,7 +149,7 @@ data: pluginConfig: catalog: providers: - gitlab: {} + gitlab: {} --- apiVersion: v1 diff --git a/examples/rhdh-cr.yaml b/examples/rhdh-cr.yaml index 66502d4a..7f915474 100644 --- a/examples/rhdh-cr.yaml +++ b/examples/rhdh-cr.yaml @@ -1,12 +1,10 @@ -apiVersion: rhdh.redhat.com/v1alpha1 +apiVersion: rhdh.redhat.com/v1alpha2 kind: Backstage metadata: name: my-rhdh spec: application: - image: quay.io/rhdh/rhdh-hub-rhel9:1.0-200 - imagePullSecrets: - - rhdh-pull-secret + image: quay.io/rhdh/rhdh-hub-rhel9:latest appConfig: configMaps: - name: app-config-rhdh @@ -24,8 +22,11 @@ data: "app-config-rhdh.yaml": | backend: auth: - keys: - - secret: "${BACKEND_SECRET}" + externalAccess: + - type: legacy + options: + subject: legacy-default-config + secret: "${BACKEND_SECRET}" auth: # see https://backstage.io/docs/auth/ to learn about auth providers environment: development diff --git a/examples/showcase-config.yaml b/examples/showcase-config.yaml index 408dc787..e9d14c5a 100644 --- a/examples/showcase-config.yaml +++ b/examples/showcase-config.yaml @@ -3,7 +3,7 @@ kind: ConfigMap metadata: name: showcase-config data: - deploy: |- + deployment.yaml: |- apiVersion: apps/v1 kind: Deployment metadata: diff --git a/examples/showcase-cr.yaml b/examples/showcase-cr.yaml index 4d2fa6bd..c8d86446 100644 --- a/examples/showcase-cr.yaml +++ b/examples/showcase-cr.yaml @@ -1,4 +1,4 @@ -apiVersion: rhdh.redhat.com/v1alpha1 +apiVersion: rhdh.redhat.com/v1alpha2 kind: Backstage metadata: name: bs-showcase diff --git a/go.mod b/go.mod index 307621e5..cc05ceb0 100644 --- a/go.mod +++ b/go.mod @@ -1,31 +1,35 @@ module redhat-developer/red-hat-developer-hub-operator -go 1.20 +go 1.21 require ( github.com/onsi/ginkgo/v2 v2.17.1 github.com/onsi/gomega v1.33.0 - github.com/openshift/api v0.0.0-20240418150331-2449d07abb86 + github.com/openshift/api v0.0.0-20240419172957-f39cf2ef93fd github.com/stretchr/testify v1.9.0 k8s.io/api v0.30.0 + k8s.io/apiextensions-apiserver v0.29.2 k8s.io/apimachinery v0.30.0 k8s.io/client-go v0.30.0 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 sigs.k8s.io/controller-runtime v0.17.3 + sigs.k8s.io/kustomize/kyaml v0.17.1 + sigs.k8s.io/yaml v1.4.0 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -34,17 +38,20 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.1 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.18.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.45.0 // indirect @@ -66,11 +73,9 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.29.2 // indirect k8s.io/component-base v0.29.2 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index d8aca7ae..52efd064 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= @@ -7,16 +9,19 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -26,8 +31,9 @@ github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -47,21 +53,23 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -70,30 +78,30 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= -github.com/openshift/api v0.0.0-20240328182048-8bef56a2e295 h1:Fv47GtZvL6XvM/eHdRyb9NJezy/wY/0YtisbZyir58E= -github.com/openshift/api v0.0.0-20240328182048-8bef56a2e295/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= -github.com/openshift/api v0.0.0-20240412130237-e2b0b690b638 h1://6BunjFcTaoaWD9IXRC2BynmIW9ag7k2ekGrUYJbzY= -github.com/openshift/api v0.0.0-20240412130237-e2b0b690b638/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= -github.com/openshift/api v0.0.0-20240415140253-c0feb35ae9fb h1:VXw3qKECkLeZFJaNw5XPnAgwn8nCeLe3OeXgGHSzRsU= -github.com/openshift/api v0.0.0-20240415140253-c0feb35ae9fb/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= -github.com/openshift/api v0.0.0-20240418150331-2449d07abb86 h1:m/w2kof5rKYG0O+Xx19mmvDenen7LQWgRdV46/iVud0= -github.com/openshift/api v0.0.0-20240418150331-2449d07abb86/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= +github.com/openshift/api v0.0.0-20240419172957-f39cf2ef93fd h1:DztdAsKaNJjfL12LyBCxL2ELPXn4NdWE/IxLCUpL7AY= +github.com/openshift/api v0.0.0-20240419172957-f39cf2ef93fd/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= @@ -102,7 +110,8 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -113,13 +122,14 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= @@ -136,8 +146,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= @@ -149,12 +157,8 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -192,24 +196,14 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= -k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= -k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= -k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= -k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= @@ -218,12 +212,12 @@ k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/A k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= -sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= +sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/hack/db_copy.sh b/hack/db_copy.sh new file mode 100644 index 00000000..a6757fef --- /dev/null +++ b/hack/db_copy.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +to_host= +to_port=5432 +to_user=postgres + +from_host=127.0.0.1 +from_port=15432 +from_user=postgres + +allDB=("backstage_plugin_app" "backstage_plugin_auth" "backstage_plugin_catalog" "backstage_plugin_permission" "backstage_plugin_scaffolder" "backstage_plugin_search") + +for db in ${!allDB[@]}; +do + db=${allDB[$db]} + echo Copying database: $db + PGPASSWORD=$TO_PSW psql -h $to_host -p $to_port -U $to_user -c "create database $db;" + pg_dump -h $from_host -p $from_port -U $from_user -d $db | PGPASSWORD=$TO_PSW psql -h $to_host -p $to_port -U $to_user -d $db +done \ No newline at end of file diff --git a/integration_tests/README.md b/integration_tests/README.md index 7a2cff99..c5532ee1 100644 --- a/integration_tests/README.md +++ b/integration_tests/README.md @@ -1,5 +1,5 @@ -How to run Integration Tests +**How to run Integration Tests** - For development (controller will reconsile internally) - As a part of the whole testing suite just: @@ -13,7 +13,7 @@ How to run Integration Tests There are 2 environment variables to use with `make` command - `USE_EXISTING_CLUSTER=true` tells test suite to use externally running cluster (from the current .kube/config context) instead of envtest. - `USE_EXISTING_CONTROLLER=true` tells test suite to use operator controller manager either deployed to the cluster OR (prevails if both) running locally with `make [install] run` command. Works only with `USE_EXISTING_CLUSTER=true` - + So, in most of the cases - Make sure you test desirable version of Operator image, that's what `make image-build image-push` does. See Makefile what version `` has. @@ -21,4 +21,26 @@ How to run Integration Tests - `make install deploy` this will install CR and deploy Controller to `backstage-system` - `make integration-test USE_EXISTING_CLUSTER=true USE_EXISTING_CONTROLLER=true` - \ No newline at end of file +To run GINKGO with command line arguments (see https://onsi.github.io/ginkgo/#running-specs) +use 'ARGS' environment variable. +For example to run specific test(s) you can use something like: + +`make integration-test ARGS='--focus "my favorite test"'` + +**NOTE:** + +Some tests are Openshift specific only and skipped in a local envtest and bare k8s cluster. + +` +if !isOpenshiftCluster() { +Skip("Skipped for non-Openshift cluster") +} +` + +Some tests are workable only in real (EXISTING) cluster and skipped in envtest. + +` +if !*testEnv.UseExistingCluster { +Skip("Skipped for not real cluster") +} +` \ No newline at end of file diff --git a/integration_tests/config-refresh_test.go b/integration_tests/config-refresh_test.go new file mode 100644 index 00000000..92fed3e8 --- /dev/null +++ b/integration_tests/config-refresh_test.go @@ -0,0 +1,164 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integration_tests + +import ( + "context" + "fmt" + "redhat-developer/red-hat-developer-hub-operator/pkg/utils" + "strings" + "time" + + "sigs.k8s.io/controller-runtime/pkg/client" + + corev1 "k8s.io/api/core/v1" + + appsv1 "k8s.io/api/apps/v1" + + "redhat-developer/red-hat-developer-hub-operator/pkg/model" + + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" + + "k8s.io/apimachinery/pkg/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = When("create backstage with external configuration", func() { + + var ( + ctx context.Context + ns string + ) + + BeforeEach(func() { + ctx = context.Background() + ns = createNamespace(ctx) + }) + + AfterEach(func() { + deleteNamespace(ctx, ns) + }) + + It("refresh config", func() { + + if !*testEnv.UseExistingCluster { + Skip("Skipped for not real cluster") + } + + appConfig1 := "app-config1" + secretEnv1 := "secret-env1" + + backstageName := generateRandName("") + + generateConfigMap(ctx, k8sClient, appConfig1, ns, map[string]string{"key11": "app:", "key12": "app:"}, nil, nil) + generateSecret(ctx, k8sClient, secretEnv1, ns, map[string]string{"sec11": "val11"}, nil, nil) + + bs := bsv1.BackstageSpec{ + Application: &bsv1.Application{ + AppConfig: &bsv1.AppConfig{ + MountPath: "/my/mount/path", + ConfigMaps: []bsv1.ObjectKeyRef{ + {Name: appConfig1}, + }, + }, + ExtraEnvs: &bsv1.ExtraEnvs{ + Secrets: []bsv1.ObjectKeyRef{ + {Name: secretEnv1, Key: "sec11"}, + }, + }, + }, + } + + createAndReconcileBackstage(ctx, ns, bs, backstageName) + + Eventually(func(g Gomega) { + deploy := &appsv1.Deployment{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DeploymentName(backstageName)}, deploy) + g.Expect(err).ShouldNot(HaveOccurred()) + + podList := &corev1.PodList{} + err = k8sClient.List(ctx, podList, client.InNamespace(ns), client.MatchingLabels{model.BackstageAppLabel: utils.BackstageAppLabelValue(backstageName)}) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(len(podList.Items)).To(Equal(1)) + podName := podList.Items[0].Name + out, _, err := executeRemoteCommand(ctx, ns, podName, "backstage-backend", "cat /my/mount/path/key11") + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(out).To(Equal("app:")) + + out, _, err = executeRemoteCommand(ctx, ns, podName, "backstage-backend", "echo $sec11") + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect("val11\r\n").To(Equal(out)) + + }, 10*time.Minute, 10*time.Second).Should(Succeed(), controllerMessage()) + + cm := &corev1.ConfigMap{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: appConfig1}, cm) + Expect(err).ShouldNot(HaveOccurred()) + + newData := "app:\n backend:" + cm.Data = map[string]string{"key11": newData} + err = k8sClient.Update(ctx, cm) + Expect(err).ShouldNot(HaveOccurred()) + + Eventually(func(g Gomega) { + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: appConfig1}, cm) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(cm.Data["key11"]).To(Equal(newData)) + + // Pod replaced so have to re-ask + podList := &corev1.PodList{} + err = k8sClient.List(ctx, podList, client.InNamespace(ns), client.MatchingLabels{model.BackstageAppLabel: utils.BackstageAppLabelValue(backstageName)}) + g.Expect(err).ShouldNot(HaveOccurred()) + + podName := podList.Items[0].Name + out, _, err := executeRemoteCommand(ctx, ns, podName, "backstage-backend", "cat /my/mount/path/key11") + g.Expect(err).ShouldNot(HaveOccurred()) + // TODO nicer method to compare file content with added '\r' + g.Expect(strings.ReplaceAll(out, "\r", "")).To(Equal(newData)) + + _, _, err = executeRemoteCommand(ctx, ns, podName, "backstage-backend", "cat /my/mount/path/key12") + g.Expect(err).Should(HaveOccurred()) + + }, 10*time.Minute, 10*time.Second).Should(Succeed(), controllerMessage()) + + sec := &corev1.Secret{} + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: secretEnv1}, sec) + Expect(err).ShouldNot(HaveOccurred()) + newEnv := "val22" + sec.StringData = map[string]string{"sec11": newEnv} + err = k8sClient.Update(ctx, sec) + Expect(err).ShouldNot(HaveOccurred()) + + Eventually(func(g Gomega) { + + // Pod replaced so have to re-ask + podList := &corev1.PodList{} + err = k8sClient.List(ctx, podList, client.InNamespace(ns), client.MatchingLabels{model.BackstageAppLabel: utils.BackstageAppLabelValue(backstageName)}) + g.Expect(err).ShouldNot(HaveOccurred()) + + podName := podList.Items[0].Name + + out, _, err := executeRemoteCommand(ctx, ns, podName, "backstage-backend", "echo $sec11") + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(fmt.Sprintf("%s%s", newEnv, "\r\n")).To(Equal(out)) + + }, 10*time.Minute, 10*time.Second).Should(Succeed(), controllerMessage()) + + }) + +}) diff --git a/integration_tests/cr-config_test.go b/integration_tests/cr-config_test.go index 622c0790..d92dbae3 100644 --- a/integration_tests/cr-config_test.go +++ b/integration_tests/cr-config_test.go @@ -18,12 +18,10 @@ import ( "context" "time" - "k8s.io/apimachinery/pkg/api/errors" + corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" - corev1 "k8s.io/api/core/v1" - "redhat-developer/red-hat-developer-hub-operator/pkg/utils" appsv1 "k8s.io/api/apps/v1" @@ -32,7 +30,7 @@ import ( "redhat-developer/red-hat-developer-hub-operator/pkg/model" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "k8s.io/apimachinery/pkg/types" @@ -58,57 +56,62 @@ var _ = When("create backstage with CR configured", func() { It("creates Backstage with configuration ", func() { - appConfig1 := generateConfigMap(ctx, k8sClient, "app-config1", ns, map[string]string{"key11": "app:", "key12": "app:"}) - appConfig2 := generateConfigMap(ctx, k8sClient, "app-config2", ns, map[string]string{"key21": "app:", "key22": "app:"}) + appConfig1 := generateConfigMap(ctx, k8sClient, "app-config1", ns, map[string]string{"key11": "app:", "key12": "app:"}, nil, nil) + appConfig2 := generateConfigMap(ctx, k8sClient, "app-config2", ns, map[string]string{"key21": "app:", "key22": "app:"}, nil, nil) + appConfig3 := generateConfigMap(ctx, k8sClient, "app-config3.dot", ns, map[string]string{"key.31": "app31:"}, nil, nil) - cmFile1 := generateConfigMap(ctx, k8sClient, "cm-file1", ns, map[string]string{"cm11": "11", "cm12": "12"}) - cmFile2 := generateConfigMap(ctx, k8sClient, "cm-file2", ns, map[string]string{"cm21": "21", "cm22": "22"}) + cmFile1 := generateConfigMap(ctx, k8sClient, "cm-file1", ns, map[string]string{"cm11": "11", "cm12": "12"}, nil, nil) + cmFile2 := generateConfigMap(ctx, k8sClient, "cm-file2", ns, map[string]string{"cm21": "21", "cm22": "22"}, nil, nil) + cmFile3 := generateConfigMap(ctx, k8sClient, "cm-file3.dot", ns, map[string]string{"cm.31": "31"}, nil, nil) - secretFile1 := generateSecret(ctx, k8sClient, "secret-file1", ns, []string{"sec11", "sec12"}) - secretFile2 := generateSecret(ctx, k8sClient, "secret-file2", ns, []string{"sec21", "sec22"}) + secretFile1 := generateSecret(ctx, k8sClient, "secret-file1", ns, map[string]string{"sec11": "val11", "sec12": "val12"}, nil, nil) + secretFile2 := generateSecret(ctx, k8sClient, "secret-file2", ns, map[string]string{"sec21": "val21", "sec22": "val22"}, nil, nil) + secretFile3 := generateSecret(ctx, k8sClient, "secret-file3.dot", ns, map[string]string{"sec.31": "val31", "sec.32": "val22"}, nil, nil) - cmEnv1 := generateConfigMap(ctx, k8sClient, "cm-env1", ns, map[string]string{"cm11": "11", "cm12": "12"}) - cmEnv2 := generateConfigMap(ctx, k8sClient, "cm-env2", ns, map[string]string{"cm21": "21", "cm22": "22"}) + cmEnv1 := generateConfigMap(ctx, k8sClient, "cm-env1", ns, map[string]string{"cm11": "11", "cm12": "12"}, nil, nil) + cmEnv2 := generateConfigMap(ctx, k8sClient, "cm-env2", ns, map[string]string{"cm21": "21", "cm22": "22"}, nil, nil) - secretEnv1 := generateSecret(ctx, k8sClient, "secret-env1", ns, []string{"sec11", "sec12"}) - _ = generateSecret(ctx, k8sClient, "secret-env2", ns, []string{"sec21", "sec22"}) + secretEnv1 := generateSecret(ctx, k8sClient, "secret-env1", ns, map[string]string{"sec11": "val11", "sec12": "val12"}, nil, nil) + _ = generateSecret(ctx, k8sClient, "secret-env2", ns, map[string]string{"sec21": "val21", "sec22": "val22"}, nil, nil) - bs := bsv1alpha1.BackstageSpec{ - Application: &bsv1alpha1.Application{ - AppConfig: &bsv1alpha1.AppConfig{ + bs := bsv1.BackstageSpec{ + Application: &bsv1.Application{ + AppConfig: &bsv1.AppConfig{ MountPath: "/my/mount/path", - ConfigMaps: []bsv1alpha1.ObjectKeyRef{ + ConfigMaps: []bsv1.ObjectKeyRef{ {Name: appConfig1}, {Name: appConfig2, Key: "key21"}, + {Name: appConfig3}, }, }, - //DynamicPluginsConfigMapName: "", - ExtraFiles: &bsv1alpha1.ExtraFiles{ + ExtraFiles: &bsv1.ExtraFiles{ MountPath: "/my/file/path", - ConfigMaps: []bsv1alpha1.ObjectKeyRef{ + ConfigMaps: []bsv1.ObjectKeyRef{ {Name: cmFile1}, {Name: cmFile2, Key: "cm21"}, + {Name: cmFile3}, }, - Secrets: []bsv1alpha1.ObjectKeyRef{ + Secrets: []bsv1.ObjectKeyRef{ {Name: secretFile1, Key: "sec11"}, {Name: secretFile2, Key: "sec21"}, + {Name: secretFile3, Key: "sec.31"}, }, }, - ExtraEnvs: &bsv1alpha1.ExtraEnvs{ - ConfigMaps: []bsv1alpha1.ObjectKeyRef{ + ExtraEnvs: &bsv1.ExtraEnvs{ + ConfigMaps: []bsv1.ObjectKeyRef{ {Name: cmEnv1}, {Name: cmEnv2, Key: "cm21"}, }, - Secrets: []bsv1alpha1.ObjectKeyRef{ + Secrets: []bsv1.ObjectKeyRef{ {Name: secretEnv1, Key: "sec11"}, }, - Envs: []bsv1alpha1.Env{ + Envs: []bsv1.Env{ {Name: "env1", Value: "val1"}, }, }, }, } - backstageName := createAndReconcileBackstage(ctx, ns, bs) + backstageName := createAndReconcileBackstage(ctx, ns, bs, "") Eventually(func(g Gomega) { deploy := &appsv1.Deployment{} @@ -121,38 +124,45 @@ var _ = When("create backstage with CR configured", func() { By("checking if app-config volumes are added to PodSpec") g.Expect(utils.GenerateVolumeNameFromCmOrSecret(appConfig1)).To(BeAddedAsVolumeToPodSpec(podSpec)) g.Expect(utils.GenerateVolumeNameFromCmOrSecret(appConfig2)).To(BeAddedAsVolumeToPodSpec(podSpec)) + g.Expect(utils.GenerateVolumeNameFromCmOrSecret(appConfig3)).To(BeAddedAsVolumeToPodSpec(podSpec)) By("checking if app-config volumes are mounted to the Backstage container") g.Expect("/my/mount/path/key11").To(BeMountedToContainer(c)) g.Expect("/my/mount/path/key12").To(BeMountedToContainer(c)) g.Expect("/my/mount/path/key21").To(BeMountedToContainer(c)) g.Expect("/my/mount/path/key22").NotTo(BeMountedToContainer(c)) + g.Expect("/my/mount/path/key.31").To(BeMountedToContainer(c)) By("checking if app-config args are added to the Backstage container") g.Expect("/my/mount/path/key11").To(BeAddedAsArgToContainer(c)) g.Expect("/my/mount/path/key12").To(BeAddedAsArgToContainer(c)) g.Expect("/my/mount/path/key21").To(BeAddedAsArgToContainer(c)) g.Expect("/my/mount/path/key22").NotTo(BeAddedAsArgToContainer(c)) + g.Expect("/my/mount/path/key.31").To(BeAddedAsArgToContainer(c)) By("checking if extra-cm-file volumes are added to PodSpec") g.Expect(utils.GenerateVolumeNameFromCmOrSecret(cmFile1)).To(BeAddedAsVolumeToPodSpec(podSpec)) g.Expect(utils.GenerateVolumeNameFromCmOrSecret(cmFile2)).To(BeAddedAsVolumeToPodSpec(podSpec)) + g.Expect(utils.GenerateVolumeNameFromCmOrSecret(cmFile3)).To(BeAddedAsVolumeToPodSpec(podSpec)) By("checking if extra-cm-file volumes are mounted to the Backstage container") g.Expect("/my/file/path/cm11").To(BeMountedToContainer(c)) g.Expect("/my/file/path/cm12").To(BeMountedToContainer(c)) g.Expect("/my/file/path/cm21").To(BeMountedToContainer(c)) g.Expect("/my/file/path/cm22").NotTo(BeMountedToContainer(c)) + g.Expect("/my/file/path/cm.31").To(BeMountedToContainer(c)) By("checking if extra-secret-file volumes are added to PodSpec") g.Expect(utils.GenerateVolumeNameFromCmOrSecret("secret-file1")).To(BeAddedAsVolumeToPodSpec(podSpec)) g.Expect(utils.GenerateVolumeNameFromCmOrSecret("secret-file2")).To(BeAddedAsVolumeToPodSpec(podSpec)) + g.Expect(utils.GenerateVolumeNameFromCmOrSecret("secret-file3.dot")).To(BeAddedAsVolumeToPodSpec(podSpec)) By("checking if extra-secret-file volumes are mounted to the Backstage container") g.Expect("/my/file/path/sec11").To(BeMountedToContainer(c)) g.Expect("/my/file/path/sec12").NotTo(BeMountedToContainer(c)) g.Expect("/my/file/path/sec21").To(BeMountedToContainer(c)) g.Expect("/my/file/path/sec22").NotTo(BeMountedToContainer(c)) + g.Expect("/my/file/path/sec.31").To(BeMountedToContainer(c)) By("checking if extra-envvars are injected to the Backstage container as EnvFrom") g.Expect("cm-env1").To(BeEnvFromForContainer(c)) @@ -162,17 +172,43 @@ var _ = When("create backstage with CR configured", func() { g.Expect("cm21").To(BeEnvVarForContainer(c)) g.Expect("sec11").To(BeEnvVarForContainer(c)) - for _, cond := range deploy.Status.Conditions { - if cond.Type == "Available" { - g.Expect(cond.Status).To(Equal(corev1.ConditionTrue)) - } - } - }, 5*time.Minute, time.Second).Should(Succeed(), controllerMessage()) + }, time.Minute, time.Second).Should(Succeed(), controllerMessage()) + }) + + It("generates label and annotation", func() { + + appConfig := generateConfigMap(ctx, k8sClient, "app-config1", ns, map[string]string{"key11": "app:", "key12": "app:"}, nil, nil) + + bs := bsv1.BackstageSpec{ + Application: &bsv1.Application{ + AppConfig: &bsv1.AppConfig{ + ConfigMaps: []bsv1.ObjectKeyRef{ + {Name: appConfig}, + }, + }, + }, + } + + backstageName := createAndReconcileBackstage(ctx, ns, bs, "") + Eventually(func(g Gomega) { + + cm := &corev1.ConfigMap{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: appConfig}, cm) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(cm.Labels).To(HaveLen(1)) + g.Expect(cm.Labels[model.ExtConfigSyncLabel]).To(Equal("true")) + + g.Expect(cm.Annotations).To(HaveLen(1)) + g.Expect(cm.Annotations[model.BackstageNameAnnotation]).To(Equal(backstageName)) + + }, 10*time.Second, time.Second).Should(Succeed()) + }) It("creates default Backstage and then update CR ", func() { - backstageName := createAndReconcileBackstage(ctx, ns, bsv1alpha1.BackstageSpec{}) + backstageName := createAndReconcileBackstage(ctx, ns, bsv1.BackstageSpec{}, "") Eventually(func(g Gomega) { By("creating Deployment with replicas=1 by default") @@ -187,10 +223,10 @@ var _ = When("create backstage with CR configured", func() { By("updating Backstage") imageName := "quay.io/my-org/my-awesome-image:1.2.3" ips := []string{"some-image-pull-secret-1", "some-image-pull-secret-2"} - update := &bsv1alpha1.Backstage{} + update := &bsv1.Backstage{} err := k8sClient.Get(ctx, types.NamespacedName{Name: backstageName, Namespace: ns}, update) Expect(err).To(Not(HaveOccurred())) - update.Spec.Application = &bsv1alpha1.Application{} + update.Spec.Application = &bsv1.Application{} update.Spec.Application.Replicas = ptr.To(int32(2)) update.Spec.Application.Image = ptr.To(imageName) update.Spec.Application.ImagePullSecrets = ips @@ -215,95 +251,49 @@ var _ = When("create backstage with CR configured", func() { }) - It("creates default Backstage and then update CR to not to use local DB", func() { - backstageName := createAndReconcileBackstage(ctx, ns, bsv1alpha1.BackstageSpec{}) + It("creates Backstage deployment with spec.deployment ", func() { - Eventually(func(g Gomega) { - By("creating Deployment with database.enableLocalDb=true by default") + bs2 := &bsv1.Backstage{} - err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DbStatefulSetName(backstageName)}, &appsv1.StatefulSet{}) - g.Expect(err).To(Not(HaveOccurred())) + err := utils.ReadYamlFile("testdata/spec-deployment.yaml", bs2) + Expect(err).To(Not(HaveOccurred())) - err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DbStatefulSetName(backstageName)}, &corev1.Service{}) - g.Expect(err).To(Not(HaveOccurred())) + backstageName := createAndReconcileBackstage(ctx, ns, bs2.Spec, "") - err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DbStatefulSetName(backstageName)}, &corev1.Secret{}) + Eventually(func(g Gomega) { + By("creating Deployment ") + deploy := &appsv1.Deployment{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DeploymentName(backstageName)}, deploy) g.Expect(err).To(Not(HaveOccurred())) + var bscontainer corev1.Container + for _, c := range deploy.Spec.Template.Spec.Containers { - }, time.Minute, time.Second).Should(Succeed()) - - By("updating Backstage") - update := &bsv1alpha1.Backstage{} - err := k8sClient.Get(ctx, types.NamespacedName{Name: backstageName, Namespace: ns}, update) - Expect(err).To(Not(HaveOccurred())) - update.Spec.Database = &bsv1alpha1.Database{} - update.Spec.Database.EnableLocalDb = ptr.To(false) - err = k8sClient.Update(ctx, update) - Expect(err).To(Not(HaveOccurred())) - _, err = NewTestBackstageReconciler(ns).ReconcileAny(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: backstageName, Namespace: ns}, - }) - Expect(err).To(Not(HaveOccurred())) + if c.Name == "backstage-backend" { + bscontainer = c + break + } + } - Eventually(func(g Gomega) { - By("deleting Local Db StatefulSet, Service and Secret") - err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DbStatefulSetName(backstageName)}, &appsv1.StatefulSet{}) - g.Expect(err).To(HaveOccurred()) - g.Expect(errors.IsNotFound(err)) - - err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DbServiceName(backstageName)}, &corev1.Service{}) - g.Expect(err).To(HaveOccurred()) - g.Expect(errors.IsNotFound(err)) - - err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DbSecretDefaultName(backstageName)}, &corev1.Secret{}) - g.Expect(err).To(HaveOccurred()) - g.Expect(errors.IsNotFound(err)) - }, time.Minute, time.Second).Should(Succeed()) + g.Expect(bscontainer).NotTo(BeNil()) + g.Expect(bscontainer.Image).To(HaveValue(Equal("busybox"))) - }) + var bsvolume corev1.Volume + for _, v := range deploy.Spec.Template.Spec.Volumes { - It("creates Backstage with disabled local DB and secret", func() { - backstageName := createAndReconcileBackstage(ctx, ns, bsv1alpha1.BackstageSpec{ - Database: &bsv1alpha1.Database{ - EnableLocalDb: ptr.To(false), - AuthSecretName: "existing-secret", - }, - }) + if v.Name == "dynamic-plugins-root" { + bsvolume = v + break + } + } - Eventually(func(g Gomega) { - By("not creating a StatefulSet for the Database") - err := k8sClient.Get(ctx, - types.NamespacedName{Namespace: ns, Name: model.DbStatefulSetName(backstageName)}, - &appsv1.StatefulSet{}) - g.Expect(err).Should(HaveOccurred()) - g.Expect(errors.IsNotFound(err)) - - By("Checking if Deployment was successfully created in the reconciliation") - err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DeploymentName(backstageName)}, &appsv1.Deployment{}) - g.Expect(err).Should(Not(HaveOccurred())) - }, time.Minute, time.Second).Should(Succeed()) - }) + g.Expect(bsvolume).NotTo(BeNil()) + g.Expect(bsvolume.Ephemeral).NotTo(BeNil()) + g.Expect(*bsvolume.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName).To(Equal("special")) - It("creates Backstage with disabled local DB no secret", func() { - backstageName := createAndReconcileBackstage(ctx, ns, bsv1alpha1.BackstageSpec{ - Database: &bsv1alpha1.Database{ - EnableLocalDb: ptr.To(false), - }, - }) + }, 10*time.Second, time.Second).Should(Succeed()) - Eventually(func(g Gomega) { - By("not creating a StatefulSet for the Database") - err := k8sClient.Get(ctx, - types.NamespacedName{Namespace: ns, Name: model.DbStatefulSetName(backstageName)}, - &appsv1.StatefulSet{}) - g.Expect(err).Should(HaveOccurred()) - g.Expect(errors.IsNotFound(err)) - - By("Checking if Deployment was successfully created in the reconciliation") - err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DeploymentName(backstageName)}, &appsv1.Deployment{}) - g.Expect(err).Should(Not(HaveOccurred())) - }, time.Minute, time.Second).Should(Succeed()) }) + }) // Duplicated files in different CMs diff --git a/integration_tests/db_test.go b/integration_tests/db_test.go new file mode 100644 index 00000000..ffdaff6d --- /dev/null +++ b/integration_tests/db_test.go @@ -0,0 +1,148 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integration_tests + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/utils/ptr" + + corev1 "k8s.io/api/core/v1" + + appsv1 "k8s.io/api/apps/v1" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "redhat-developer/red-hat-developer-hub-operator/pkg/model" + + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" + + "k8s.io/apimachinery/pkg/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = When("create backstage with CR configured", func() { + + var ( + ctx context.Context + ns string + ) + + BeforeEach(func() { + ctx = context.Background() + ns = createNamespace(ctx) + }) + + AfterEach(func() { + deleteNamespace(ctx, ns) + }) + + It("creates default Backstage and then update CR to not to use local DB", func() { + backstageName := createAndReconcileBackstage(ctx, ns, bsv1.BackstageSpec{}, "") + + Eventually(func(g Gomega) { + By("creating Deployment with database.enableLocalDb=true by default") + + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: fmt.Sprintf("backstage-psql-%s", backstageName)}, &appsv1.StatefulSet{}) + g.Expect(err).To(Not(HaveOccurred())) + + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: fmt.Sprintf("backstage-psql-%s", backstageName)}, &corev1.Service{}) + g.Expect(err).To(Not(HaveOccurred())) + + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: fmt.Sprintf("backstage-psql-secret-%s", backstageName)}, &corev1.Secret{}) + g.Expect(err).To(Not(HaveOccurred())) + + }, time.Minute, time.Second).Should(Succeed()) + + By("updating Backstage") + update := &bsv1.Backstage{} + err := k8sClient.Get(ctx, types.NamespacedName{Name: backstageName, Namespace: ns}, update) + Expect(err).To(Not(HaveOccurred())) + update.Spec.Database = &bsv1.Database{} + update.Spec.Database.EnableLocalDb = ptr.To(false) + err = k8sClient.Update(ctx, update) + Expect(err).To(Not(HaveOccurred())) + _, err = NewTestBackstageReconciler(ns).ReconcileAny(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{Name: backstageName, Namespace: ns}, + }) + Expect(err).To(Not(HaveOccurred())) + + Eventually(func(g Gomega) { + By("deleting Local Db StatefulSet, Service and Secret") + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: fmt.Sprintf("backstage-psql-%s", backstageName)}, &appsv1.StatefulSet{}) + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.IsNotFound(err)) + + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: fmt.Sprintf("backstage-psql-%s", backstageName)}, &corev1.Service{}) + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.IsNotFound(err)) + + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: fmt.Sprintf("backstage-psql-secret-%s", backstageName)}, &corev1.Secret{}) + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.IsNotFound(err)) + }, time.Minute, time.Second).Should(Succeed()) + + }) + + It("creates Backstage with disabled local DB and secret", func() { + backstageName := createAndReconcileBackstage(ctx, ns, bsv1.BackstageSpec{ + Database: &bsv1.Database{ + EnableLocalDb: ptr.To(false), + AuthSecretName: "existing-secret", + }, + }, "") + + Eventually(func(g Gomega) { + By("not creating a StatefulSet for the Database") + err := k8sClient.Get(ctx, + types.NamespacedName{Namespace: ns, Name: fmt.Sprintf("backstage-psql-%s", backstageName)}, + &appsv1.StatefulSet{}) + g.Expect(err).Should(HaveOccurred()) + g.Expect(errors.IsNotFound(err)) + + By("Checking if Deployment was successfully created in the reconciliation") + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DeploymentName(backstageName)}, &appsv1.Deployment{}) + g.Expect(err).Should(Not(HaveOccurred())) + }, time.Minute, time.Second).Should(Succeed()) + }) + + It("creates Backstage with disabled local DB no secret", func() { + backstageName := createAndReconcileBackstage(ctx, ns, bsv1.BackstageSpec{ + Database: &bsv1.Database{ + EnableLocalDb: ptr.To(false), + }, + }, "") + + Eventually(func(g Gomega) { + By("not creating a StatefulSet for the Database") + err := k8sClient.Get(ctx, + types.NamespacedName{Namespace: ns, Name: fmt.Sprintf("backstage-psql-%s", backstageName)}, + &appsv1.StatefulSet{}) + g.Expect(err).Should(HaveOccurred()) + g.Expect(errors.IsNotFound(err)) + + By("Checking if Deployment was successfully created in the reconciliation") + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DeploymentName(backstageName)}, &appsv1.Deployment{}) + g.Expect(err).Should(Not(HaveOccurred())) + }, time.Minute, time.Second).Should(Succeed()) + }) + +}) diff --git a/integration_tests/default-config_test.go b/integration_tests/default-config_test.go index d97e713e..a4e00aa0 100644 --- a/integration_tests/default-config_test.go +++ b/integration_tests/default-config_test.go @@ -16,14 +16,17 @@ package integration_tests import ( "context" - "redhat-developer/red-hat-developer-hub-operator/pkg/utils" + "fmt" "time" - appsv1 "k8s.io/api/apps/v1" + "redhat-developer/red-hat-developer-hub-operator/pkg/utils" "redhat-developer/red-hat-developer-hub-operator/pkg/model" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" corev1 "k8s.io/api/core/v1" @@ -51,12 +54,12 @@ var _ = When("create default backstage", func() { It("creates runtime objects", func() { - backstageName := createAndReconcileBackstage(ctx, ns, bsv1alpha1.BackstageSpec{}) + backstageName := createAndReconcileBackstage(ctx, ns, bsv1.BackstageSpec{}, "") Eventually(func(g Gomega) { By("creating a secret for accessing the Database") secret := &corev1.Secret{} - secretName := model.DbSecretDefaultName(backstageName) + secretName := fmt.Sprintf("backstage-psql-secret-%s", backstageName) err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: secretName}, secret) g.Expect(err).ShouldNot(HaveOccurred(), controllerMessage()) g.Expect(len(secret.Data)).To(Equal(5)) @@ -64,7 +67,7 @@ var _ = When("create default backstage", func() { By("creating a StatefulSet for the Database") ss := &appsv1.StatefulSet{} - err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DbStatefulSetName(backstageName)}, ss) + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: fmt.Sprintf("backstage-psql-%s", backstageName)}, ss) g.Expect(err).ShouldNot(HaveOccurred()) By("injecting default DB Secret as an env var for Db container") @@ -72,7 +75,7 @@ var _ = When("create default backstage", func() { g.Expect(ss.GetOwnerReferences()).To(HaveLen(1)) By("creating a Service for the Database") - err = k8sClient.Get(ctx, types.NamespacedName{Name: model.DbServiceName(backstageName), Namespace: ns}, &corev1.Service{}) + err = k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf("backstage-psql-%s", backstageName), Namespace: ns}, &corev1.Service{}) g.Expect(err).To(Not(HaveOccurred())) By("creating Deployment") @@ -95,20 +98,30 @@ var _ = When("create default backstage", func() { g.Expect(utils.GenerateVolumeNameFromCmOrSecret(model.AppConfigDefaultName(backstageName))). To(BeAddedAsVolumeToPodSpec(deploy.Spec.Template.Spec)) - By("setting Backstage status") - bs := &bsv1alpha1.Backstage{} - err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: backstageName}, bs) - g.Expect(err).ShouldNot(HaveOccurred()) - // TODO better matcher for Conditions - g.Expect(bs.Status.Conditions[0].Reason).To(Equal("Deployed")) + }, 5*time.Minute, time.Second).Should(Succeed()) - for _, cond := range deploy.Status.Conditions { - if cond.Type == "Available" { - g.Expect(cond.Status).To(Equal(corev1.ConditionTrue)) - } - } + if *testEnv.UseExistingCluster { + By("setting Backstage status (real cluster only)") + Eventually(func(g Gomega) { - }, 5*time.Minute, time.Second).Should(Succeed()) + bs := &bsv1.Backstage{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: backstageName}, bs) + g.Expect(err).ShouldNot(HaveOccurred()) + + deploy := &appsv1.Deployment{} + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DeploymentName(backstageName)}, deploy) + g.Expect(err).ShouldNot(HaveOccurred()) + + // TODO better matcher for Conditions + g.Expect(bs.Status.Conditions[0].Reason).To(Equal("Deployed")) + + for _, cond := range deploy.Status.Conditions { + if cond.Type == "Available" { + g.Expect(cond.Status).To(Equal(corev1.ConditionTrue)) + } + } + }, 5*time.Minute, time.Second).Should(Succeed()) + } }) It("creates runtime object using raw configuration ", func() { @@ -116,15 +129,15 @@ var _ = When("create default backstage", func() { bsConf := map[string]string{"deployment.yaml": readTestYamlFile("raw-deployment.yaml")} dbConf := map[string]string{"db-statefulset.yaml": readTestYamlFile("raw-statefulset.yaml")} - bsRaw := generateConfigMap(ctx, k8sClient, "bsraw", ns, bsConf) - dbRaw := generateConfigMap(ctx, k8sClient, "dbraw", ns, dbConf) + bsRaw := generateConfigMap(ctx, k8sClient, "bsraw", ns, bsConf, nil, nil) + dbRaw := generateConfigMap(ctx, k8sClient, "dbraw", ns, dbConf, nil, nil) - backstageName := createAndReconcileBackstage(ctx, ns, bsv1alpha1.BackstageSpec{ - RawRuntimeConfig: &bsv1alpha1.RuntimeConfig{ + backstageName := createAndReconcileBackstage(ctx, ns, bsv1.BackstageSpec{ + RawRuntimeConfig: &bsv1.RuntimeConfig{ BackstageConfigName: bsRaw, LocalDbConfigName: dbRaw, }, - }) + }, "") Eventually(func(g Gomega) { By("creating Deployment") @@ -137,7 +150,7 @@ var _ = When("create default backstage", func() { By("creating StatefulSet") ss := &appsv1.StatefulSet{} - name := model.DbStatefulSetName(backstageName) + name := fmt.Sprintf("backstage-psql-%s", backstageName) err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: name}, ss) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(ss.Spec.Template.Spec.Containers).To(HaveLen(1)) @@ -146,4 +159,61 @@ var _ = When("create default backstage", func() { }) + It("creates runtime object using raw configuration then updates StatefulSet to replace some immutable fields", func() { + if !*testEnv.UseExistingCluster { + Skip("Real cluster required to assert actual deletion and replacement of resources") + } + + rawStatefulSetYamlContent := readTestYamlFile("raw-statefulset.yaml") + dbConf := map[string]string{"db-statefulset.yaml": rawStatefulSetYamlContent} + + dbRaw := generateConfigMap(ctx, k8sClient, "dbraw", ns, dbConf, nil, nil) + + backstageName := createAndReconcileBackstage(ctx, ns, bsv1.BackstageSpec{ + RawRuntimeConfig: &bsv1.RuntimeConfig{ + LocalDbConfigName: dbRaw, + }, + }, "") + + Eventually(func(g Gomega) { + By("creating Deployment") + deploy := &appsv1.Deployment{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: model.DeploymentName(backstageName)}, deploy) + g.Expect(err).ShouldNot(HaveOccurred()) + + By("creating StatefulSet") + dbStatefulSet := &appsv1.StatefulSet{} + name := fmt.Sprintf("backstage-psql-%s", backstageName) + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: name}, dbStatefulSet) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(dbStatefulSet.Spec.Template.Spec.Containers).To(HaveLen(1)) + g.Expect(dbStatefulSet.Spec.Template.Spec.Containers[0].Image).To(Equal("busybox")) + g.Expect(dbStatefulSet.Spec.PodManagementPolicy).To(Equal(appsv1.ParallelPodManagement)) + }, time.Minute, time.Second).Should(Succeed()) + + By("updating CR to default config") + update := &bsv1.Backstage{} + err := k8sClient.Get(ctx, types.NamespacedName{Name: backstageName, Namespace: ns}, update) + Expect(err).To(Not(HaveOccurred())) + update.Spec.RawRuntimeConfig = nil + err = k8sClient.Update(ctx, update) + Expect(err).To(Not(HaveOccurred())) + + // Patching StatefulSets is done by the reconciler in two passes: first deleting the StatefulSet first, then recreating it in the next reconcilation. + for i := 0; i < 2; i++ { + _, err = NewTestBackstageReconciler(ns).ReconcileAny(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{Name: backstageName, Namespace: ns}, + }) + Expect(err).To(Not(HaveOccurred())) + } + + Eventually(func(g Gomega) { + By("replacing StatefulSet") + dbStatefulSet := &appsv1.StatefulSet{} + err = k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: fmt.Sprintf("backstage-psql-%s", backstageName)}, dbStatefulSet) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(dbStatefulSet.Spec.PodManagementPolicy).To(Equal(appsv1.OrderedReadyPodManagement)) + }, time.Minute, time.Second).Should(Succeed()) + }) + }) diff --git a/integration_tests/rhdh-config_test.go b/integration_tests/rhdh-config_test.go index eabcdf65..18175958 100644 --- a/integration_tests/rhdh-config_test.go +++ b/integration_tests/rhdh-config_test.go @@ -24,7 +24,7 @@ import ( "redhat-developer/red-hat-developer-hub-operator/pkg/model" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -39,7 +39,7 @@ var _ = When("create default backstage", func() { ctx := context.Background() ns := createNamespace(ctx) - backstageName := createAndReconcileBackstage(ctx, ns, bsv1alpha1.BackstageSpec{}) + backstageName := createAndReconcileBackstage(ctx, ns, bsv1.BackstageSpec{}, "") Eventually(func(g Gomega) { deploy := &appsv1.Deployment{} @@ -54,21 +54,23 @@ var _ = When("create default backstage", func() { g.Expect(deploy.Spec.Template.Spec.InitContainers).To(HaveLen(1)) _, initCont := model.DynamicPluginsInitContainer(deploy.Spec.Template.Spec.InitContainers) //deploy.Spec.Template.Spec.InitContainers[0] - g.Expect(initCont.VolumeMounts).To(HaveLen(3)) + g.Expect(initCont.VolumeMounts).To(HaveLen(4)) g.Expect(initCont.VolumeMounts[0].MountPath).To(Equal("/dynamic-plugins-root")) g.Expect(initCont.VolumeMounts[0].SubPath).To(BeEmpty()) g.Expect(initCont.VolumeMounts[1].MountPath).To(Equal("/opt/app-root/src/.npmrc.dynamic-plugins")) g.Expect(initCont.VolumeMounts[1].SubPath).To(Equal(".npmrc")) - g.Expect(initCont.VolumeMounts[2].MountPath).To(Equal("/opt/app-root/src/dynamic-plugins.yaml")) - g.Expect(initCont.VolumeMounts[2].SubPath).To(Equal("dynamic-plugins.yaml")) - g.Expect(initCont.VolumeMounts[2].Name). + g.Expect(initCont.VolumeMounts[2].MountPath).To(Equal("/opt/app-root/src/.npm/_cacache")) + g.Expect(initCont.VolumeMounts[2].SubPath).To(BeEmpty()) + g.Expect(initCont.VolumeMounts[3].MountPath).To(Equal("/opt/app-root/src/dynamic-plugins.yaml")) + g.Expect(initCont.VolumeMounts[3].SubPath).To(Equal("dynamic-plugins.yaml")) + g.Expect(initCont.VolumeMounts[3].Name). To(Equal(utils.GenerateVolumeNameFromCmOrSecret(model.DynamicPluginsDefaultName(backstageName)))) - g.Expect(initCont.VolumeMounts[2].SubPath).To(Equal(model.DynamicPluginsFile)) + g.Expect(initCont.VolumeMounts[3].SubPath).To(Equal(model.DynamicPluginsFile)) g.Expect(initCont.Env[0].Name).To(Equal("NPM_CONFIG_USERCONFIG")) g.Expect(initCont.Env[0].Value).To(Equal("/opt/app-root/src/.npmrc.dynamic-plugins")) - g.Expect(deploy.Spec.Template.Spec.Volumes).To(HaveLen(4)) + g.Expect(deploy.Spec.Template.Spec.Volumes).To(HaveLen(5)) g.Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(1)) mainCont := deploy.Spec.Template.Spec.Containers[0] g.Expect(mainCont.Args).To(HaveLen(4)) diff --git a/integration_tests/route_test.go b/integration_tests/route_test.go index 278a63e0..f9f59011 100644 --- a/integration_tests/route_test.go +++ b/integration_tests/route_test.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "k8s.io/apimachinery/pkg/types" @@ -53,18 +53,18 @@ var _ = When("create default backstage", func() { Skip("Skipped for non-Openshift cluster") } - backstageName := createAndReconcileBackstage(ctx, ns, bsv1alpha1.BackstageSpec{ - Application: &bsv1alpha1.Application{ - Route: &bsv1alpha1.Route{ + backstageName := createAndReconcileBackstage(ctx, ns, bsv1.BackstageSpec{ + Application: &bsv1.Application{ + Route: &bsv1.Route{ //Host: "localhost", //Enabled: ptr.To(true), Subdomain: "test", }, }, - }) + }, "") Eventually(func() error { - found := &bsv1alpha1.Backstage{} + found := &bsv1.Backstage{} return k8sClient.Get(ctx, types.NamespacedName{Name: backstageName, Namespace: ns}, found) }, time.Minute, time.Second).Should(Succeed()) diff --git a/integration_tests/suite_test.go b/integration_tests/suite_test.go index e3f52ef0..a7f6e725 100644 --- a/integration_tests/suite_test.go +++ b/integration_tests/suite_test.go @@ -20,6 +20,8 @@ import ( "os" "strconv" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -48,7 +50,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/util/rand" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -114,7 +116,7 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - err = bsv1alpha1.AddToScheme(scheme.Scheme) + err = bsv1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) //+kubebuilder:scaffold:scheme @@ -141,9 +143,19 @@ func randString(n int) string { return string(b) } -func createBackstage(ctx context.Context, spec bsv1alpha1.BackstageSpec, ns string) string { - backstageName := "test-backstage-" + randString(5) - err := k8sClient.Create(ctx, &bsv1alpha1.Backstage{ +// generateRandName return random name if name is empty or name itself otherwise +func generateRandName(name string) string { + if name != "" { + return name + } + return "test-backstage-" + randString(5) +} + +func createBackstage(ctx context.Context, spec bsv1.BackstageSpec, ns string, name string) string { + + backstageName := generateRandName(name) + + err := k8sClient.Create(ctx, &bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: backstageName, Namespace: ns, @@ -154,17 +166,25 @@ func createBackstage(ctx context.Context, spec bsv1alpha1.BackstageSpec, ns stri return backstageName } -func createAndReconcileBackstage(ctx context.Context, ns string, spec bsv1alpha1.BackstageSpec) string { - backstageName := createBackstage(ctx, spec, ns) +func createAndReconcileBackstage(ctx context.Context, ns string, spec bsv1.BackstageSpec, name string) string { + backstageName := createBackstage(ctx, spec, ns, name) Eventually(func() error { - found := &bsv1alpha1.Backstage{} + found := &bsv1.Backstage{} return k8sClient.Get(ctx, types.NamespacedName{Name: backstageName, Namespace: ns}, found) }, time.Minute, time.Second).Should(Succeed()) _, err := NewTestBackstageReconciler(ns).ReconcileAny(ctx, reconcile.Request{ NamespacedName: types.NamespacedName{Name: backstageName, Namespace: ns}, }) + + if err != nil { + GinkgoWriter.Printf("===> Error detected on Backstage reconcile: %s \n", err.Error()) + if errors.IsAlreadyExists(err) || errors.IsConflict(err) { + return backstageName + } + } + Expect(err).To(Not(HaveOccurred())) return backstageName diff --git a/integration_tests/testdata/raw-statefulset.yaml b/integration_tests/testdata/raw-statefulset.yaml index 9c1090fc..86a406a9 100644 --- a/integration_tests/testdata/raw-statefulset.yaml +++ b/integration_tests/testdata/raw-statefulset.yaml @@ -4,6 +4,7 @@ metadata: name: db-statefulset spec: serviceName: "" + podManagementPolicy: Parallel replicas: 1 selector: matchLabels: diff --git a/integration_tests/testdata/spec-deployment.yaml b/integration_tests/testdata/spec-deployment.yaml new file mode 100644 index 00000000..bd1b1e9c --- /dev/null +++ b/integration_tests/testdata/spec-deployment.yaml @@ -0,0 +1,17 @@ +apiVersion: rhdh.redhat.com/v1alpha2 +kind: Backstage +spec: + deployment: + patch: + spec: + template: + spec: + containers: + - name: backstage-backend + image: busybox + volumes: + - ephemeral: + volumeClaimTemplate: + spec: + storageClassName: "special" + name: dynamic-plugins-root \ No newline at end of file diff --git a/integration_tests/utils.go b/integration_tests/utils.go index 6dcfd858..1d99443e 100644 --- a/integration_tests/utils.go +++ b/integration_tests/utils.go @@ -15,11 +15,17 @@ package integration_tests import ( + "bytes" "context" "fmt" "os" "path/filepath" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/remotecommand" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -28,11 +34,13 @@ import ( . "github.com/onsi/gomega" ) -func generateConfigMap(ctx context.Context, k8sClient client.Client, name, namespace string, data map[string]string) string { +func generateConfigMap(ctx context.Context, k8sClient client.Client, name string, namespace string, data, labels map[string]string, annotations map[string]string) string { Expect(k8sClient.Create(ctx, &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, + Name: name, + Namespace: namespace, + Labels: labels, + Annotations: annotations, }, Data: data, })).To(Not(HaveOccurred())) @@ -40,15 +48,20 @@ func generateConfigMap(ctx context.Context, k8sClient client.Client, name, names return name } -func generateSecret(ctx context.Context, k8sClient client.Client, name, namespace string, keys []string) string { - data := map[string]string{} - for _, v := range keys { +func generateSecret(ctx context.Context, k8sClient client.Client, name, namespace string, data, labels, annotations map[string]string) string { + if data == nil { + data = map[string]string{} + } + + for _, v := range data { data[v] = fmt.Sprintf("value-%s", v) } Expect(k8sClient.Create(ctx, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, + Name: name, + Namespace: namespace, + Labels: labels, + Annotations: annotations, }, StringData: data, })).To(Not(HaveOccurred())) @@ -62,3 +75,48 @@ func readTestYamlFile(name string) string { Expect(err).NotTo(HaveOccurred()) return string(b) } + +func executeRemoteCommand(ctx context.Context, podNamespace, podName, container, command string) (string, string, error) { + kubeCfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientcmd.NewDefaultClientConfigLoadingRules(), + &clientcmd.ConfigOverrides{}, + ) + restCfg, err := kubeCfg.ClientConfig() + if err != nil { + return "", "", err + } + coreClient, err := kubernetes.NewForConfig(restCfg) + if err != nil { + return "", "", err + } + + buf := &bytes.Buffer{} + errBuf := &bytes.Buffer{} + request := coreClient.CoreV1().RESTClient(). + Post(). + Namespace(podNamespace). + Resource("pods"). + Name(podName). + SubResource("exec"). + VersionedParams(&corev1.PodExecOptions{ + Command: []string{"/bin/sh", "-c", command}, + Container: container, + Stdin: false, + Stdout: true, + Stderr: true, + TTY: true, + }, scheme.ParameterCodec) + exec, err := remotecommand.NewSPDYExecutor(restCfg, "POST", request.URL()) + if err != nil { + return "", "", fmt.Errorf("%w failed creating executor %s on %v/%v", err, command, podNamespace, podName) + } + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ + Stdout: buf, + Stderr: errBuf, + }) + if err != nil { + return "", "", fmt.Errorf("%w Failed executing command %s on %v/%v", err, command, podNamespace, podName) + } + + return buf.String(), errBuf.String(), nil +} diff --git a/main.go b/main.go index 7a954211..abfc1831 100644 --- a/main.go +++ b/main.go @@ -34,7 +34,7 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - backstageiov1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + backstageiov1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" controller "redhat-developer/red-hat-developer-hub-operator/controllers" openshift "github.com/openshift/api/route/v1" diff --git a/pkg/model/appconfig.go b/pkg/model/appconfig.go index 93eaa0e9..9b544e12 100644 --- a/pkg/model/appconfig.go +++ b/pkg/model/appconfig.go @@ -19,7 +19,7 @@ import ( appsv1 "k8s.io/api/apps/v1" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" corev1 "k8s.io/api/core/v1" @@ -49,7 +49,7 @@ func AppConfigDefaultName(backstageName string) string { return utils.GenerateRuntimeObjectName(backstageName, "backstage-appconfig") } -func addAppConfigs(spec bsv1alpha1.BackstageSpec, deployment *appsv1.Deployment, model *BackstageModel) { +func addAppConfigs(spec bsv1.BackstageSpec, deployment *appsv1.Deployment, model *BackstageModel) { if spec.Application == nil || spec.Application.AppConfig == nil || spec.Application.AppConfig.ConfigMaps == nil { return @@ -89,7 +89,7 @@ func (b *AppConfig) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (b *AppConfig) addToModel(model *BackstageModel, _ bsv1alpha1.Backstage) (bool, error) { +func (b *AppConfig) addToModel(model *BackstageModel, _ bsv1.Backstage) (bool, error) { if b.ConfigMap != nil { model.setRuntimeObject(b) return true, nil @@ -98,7 +98,7 @@ func (b *AppConfig) addToModel(model *BackstageModel, _ bsv1alpha1.Backstage) (b } // implementation of RuntimeObject interface -func (b *AppConfig) validate(_ *BackstageModel, _ bsv1alpha1.Backstage) error { +func (b *AppConfig) validate(_ *BackstageModel, _ bsv1.Backstage) error { return nil } diff --git a/pkg/model/appconfig_test.go b/pkg/model/appconfig_test.go index 45ac7ead..da0b3d7e 100644 --- a/pkg/model/appconfig_test.go +++ b/pkg/model/appconfig_test.go @@ -19,7 +19,7 @@ import ( "redhat-developer/red-hat-developer-hub-operator/pkg/utils" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" corev1 "k8s.io/api/core/v1" @@ -54,16 +54,16 @@ var ( Data: map[string]string{"conf31.yaml": "", "conf32.yaml": ""}, } - appConfigTestBackstage = bsv1alpha1.Backstage{ + appConfigTestBackstage = bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Application: &bsv1alpha1.Application{ - AppConfig: &bsv1alpha1.AppConfig{ + Spec: bsv1.BackstageSpec{ + Application: &bsv1.Application{ + AppConfig: &bsv1.AppConfig{ MountPath: "/my/path", - ConfigMaps: []bsv1alpha1.ObjectKeyRef{}, + ConfigMaps: []bsv1.ObjectKeyRef{}, }, }, }, @@ -97,11 +97,11 @@ func TestSpecifiedAppConfig(t *testing.T) { bs := *appConfigTestBackstage.DeepCopy() bs.Spec.Application.AppConfig.ConfigMaps = append(bs.Spec.Application.AppConfig.ConfigMaps, - bsv1alpha1.ObjectKeyRef{Name: appConfigTestCm.Name}) + bsv1.ObjectKeyRef{Name: appConfigTestCm.Name}) bs.Spec.Application.AppConfig.ConfigMaps = append(bs.Spec.Application.AppConfig.ConfigMaps, - bsv1alpha1.ObjectKeyRef{Name: appConfigTestCm2.Name}) + bsv1.ObjectKeyRef{Name: appConfigTestCm2.Name}) bs.Spec.Application.AppConfig.ConfigMaps = append(bs.Spec.Application.AppConfig.ConfigMaps, - bsv1alpha1.ObjectKeyRef{Name: appConfigTestCm3.Name, Key: "conf31.yaml"}) + bsv1.ObjectKeyRef{Name: appConfigTestCm3.Name, Key: "conf31.yaml"}) testObj := createBackstageTest(bs).withDefaultConfig(true) testObj.externalConfig.AppConfigs = map[string]corev1.ConfigMap{appConfigTestCm.Name: appConfigTestCm, appConfigTestCm2.Name: appConfigTestCm2, @@ -127,7 +127,7 @@ func TestDefaultAndSpecifiedAppConfig(t *testing.T) { bs := *appConfigTestBackstage.DeepCopy() cms := &bs.Spec.Application.AppConfig.ConfigMaps - *cms = append(*cms, bsv1alpha1.ObjectKeyRef{Name: appConfigTestCm.Name}) + *cms = append(*cms, bsv1.ObjectKeyRef{Name: appConfigTestCm.Name}) testObj := createBackstageTest(bs).withDefaultConfig(true).addToDefaultConfig("app-config.yaml", "raw-app-config.yaml") @@ -146,16 +146,8 @@ func TestDefaultAndSpecifiedAppConfig(t *testing.T) { assert.Equal(t, 4, len(deployment.deployment.Spec.Template.Spec.Containers[0].Args)) assert.Equal(t, 2, len(deployment.deployment.Spec.Template.Spec.Volumes)) - //assert.Equal(t, filepath.Dir(deployment.deployment.Spec.Template.Spec.Containers[0].Args[1]), - // deployment.deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath) - - // it should be valid assertion using Volumes and VolumeMounts indexes since the order of adding is from default to specified - //assert.Equal(t, utils.GenerateVolumeNameFromCmOrSecret()deployment.deployment.Spec.Template.Spec.Volumes[0].Name assert.Equal(t, deployment.deployment.Spec.Template.Spec.Volumes[0].Name, deployment.deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name) - //t.Log(">>>>>>>>>>>>>>>>", ) - //t.Log(">>>>>>>>>>>>>>>>", ) - } diff --git a/pkg/model/configmapenvs.go b/pkg/model/configmapenvs.go index 205aaa13..6900da9a 100644 --- a/pkg/model/configmapenvs.go +++ b/pkg/model/configmapenvs.go @@ -15,7 +15,7 @@ package model import ( - "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" appsv1 "k8s.io/api/apps/v1" @@ -38,7 +38,7 @@ func init() { registerConfig("configmap-envs.yaml", ConfigMapEnvsFactory{}) } -func addConfigMapEnvs(spec v1alpha1.BackstageSpec, deployment *appsv1.Deployment, model *BackstageModel) { +func addConfigMapEnvs(spec v1alpha2.BackstageSpec, deployment *appsv1.Deployment, model *BackstageModel) { if spec.Application == nil || spec.Application.ExtraEnvs == nil || spec.Application.ExtraEnvs.ConfigMaps == nil { return @@ -72,7 +72,7 @@ func (p *ConfigMapEnvs) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (p *ConfigMapEnvs) addToModel(model *BackstageModel, _ v1alpha1.Backstage) (bool, error) { +func (p *ConfigMapEnvs) addToModel(model *BackstageModel, _ v1alpha2.Backstage) (bool, error) { if p.ConfigMap != nil { model.setRuntimeObject(p) return true, nil @@ -81,7 +81,7 @@ func (p *ConfigMapEnvs) addToModel(model *BackstageModel, _ v1alpha1.Backstage) } // implementation of RuntimeObject interface -func (p *ConfigMapEnvs) validate(_ *BackstageModel, _ v1alpha1.Backstage) error { +func (p *ConfigMapEnvs) validate(_ *BackstageModel, _ v1alpha2.Backstage) error { return nil } diff --git a/pkg/model/configmapenvs_test.go b/pkg/model/configmapenvs_test.go index aa3b4846..da62e10f 100644 --- a/pkg/model/configmapenvs_test.go +++ b/pkg/model/configmapenvs_test.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,13 +31,13 @@ import ( func TestDefaultConfigMapEnvFrom(t *testing.T) { - bs := bsv1alpha1.Backstage{ + bs := bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Database: &bsv1alpha1.Database{ + Spec: bsv1.BackstageSpec{ + Database: &bsv1.Database{ EnableLocalDb: ptr.To(false), }, }, @@ -60,22 +60,22 @@ func TestDefaultConfigMapEnvFrom(t *testing.T) { func TestSpecifiedConfigMapEnvs(t *testing.T) { - bs := bsv1alpha1.Backstage{ + bs := bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Application: &bsv1alpha1.Application{ - ExtraEnvs: &bsv1alpha1.ExtraEnvs{ - ConfigMaps: []bsv1alpha1.ObjectKeyRef{}, + Spec: bsv1.BackstageSpec{ + Application: &bsv1.Application{ + ExtraEnvs: &bsv1.ExtraEnvs{ + ConfigMaps: []bsv1.ObjectKeyRef{}, }, }, }, } bs.Spec.Application.ExtraEnvs.ConfigMaps = append(bs.Spec.Application.ExtraEnvs.ConfigMaps, - bsv1alpha1.ObjectKeyRef{Name: "mapName", Key: "ENV1"}) + bsv1.ObjectKeyRef{Name: "mapName", Key: "ENV1"}) testObj := createBackstageTest(bs).withDefaultConfig(true) testObj.externalConfig.ExtraEnvConfigMaps["mapName"] = corev1.ConfigMap{Data: map[string]string{"mapName": "ENV1"}} diff --git a/pkg/model/configmapfiles.go b/pkg/model/configmapfiles.go index 1977987a..78c17efa 100644 --- a/pkg/model/configmapfiles.go +++ b/pkg/model/configmapfiles.go @@ -17,7 +17,7 @@ package model import ( appsv1 "k8s.io/api/apps/v1" - "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" corev1 "k8s.io/api/core/v1" @@ -40,7 +40,7 @@ func init() { registerConfig("configmap-files.yaml", ConfigMapFilesFactory{}) } -func addConfigMapFiles(spec v1alpha1.BackstageSpec, deployment *appsv1.Deployment, model *BackstageModel) { +func addConfigMapFiles(spec v1alpha2.BackstageSpec, deployment *appsv1.Deployment, model *BackstageModel) { if spec.Application == nil || spec.Application.ExtraFiles == nil || spec.Application.ExtraFiles.ConfigMaps == nil { return @@ -80,7 +80,7 @@ func (p *ConfigMapFiles) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (p *ConfigMapFiles) addToModel(model *BackstageModel, _ v1alpha1.Backstage) (bool, error) { +func (p *ConfigMapFiles) addToModel(model *BackstageModel, _ v1alpha2.Backstage) (bool, error) { if p.ConfigMap != nil { model.setRuntimeObject(p) return true, nil @@ -89,7 +89,7 @@ func (p *ConfigMapFiles) addToModel(model *BackstageModel, _ v1alpha1.Backstage) } // implementation of RuntimeObject interface -func (p *ConfigMapFiles) validate(_ *BackstageModel, _ v1alpha1.Backstage) error { +func (p *ConfigMapFiles) validate(_ *BackstageModel, _ v1alpha2.Backstage) error { return nil } diff --git a/pkg/model/configmapfiles_test.go b/pkg/model/configmapfiles_test.go index 9dedb06a..2f0d711c 100644 --- a/pkg/model/configmapfiles_test.go +++ b/pkg/model/configmapfiles_test.go @@ -17,7 +17,7 @@ package model import ( "context" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,16 +43,16 @@ var ( // Data: map[string]string{"conf2.yaml": ""}, //} - configMapFilesTestBackstage = bsv1alpha1.Backstage{ + configMapFilesTestBackstage = bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Application: &bsv1alpha1.Application{ - ExtraFiles: &bsv1alpha1.ExtraFiles{ + Spec: bsv1.BackstageSpec{ + Application: &bsv1.Application{ + ExtraFiles: &bsv1.ExtraFiles{ MountPath: "/my/path", - ConfigMaps: []bsv1alpha1.ObjectKeyRef{}, + ConfigMaps: []bsv1.ObjectKeyRef{}, }, }, }, @@ -81,8 +81,8 @@ func TestSpecifiedConfigMapFiles(t *testing.T) { bs := *configMapFilesTestBackstage.DeepCopy() cmf := &bs.Spec.Application.ExtraFiles.ConfigMaps - *cmf = append(*cmf, bsv1alpha1.ObjectKeyRef{Name: appConfigTestCm.Name}) - *cmf = append(*cmf, bsv1alpha1.ObjectKeyRef{Name: appConfigTestCm2.Name}) + *cmf = append(*cmf, bsv1.ObjectKeyRef{Name: appConfigTestCm.Name}) + *cmf = append(*cmf, bsv1.ObjectKeyRef{Name: appConfigTestCm2.Name}) testObj := createBackstageTest(bs).withDefaultConfig(true) @@ -104,7 +104,7 @@ func TestDefaultAndSpecifiedConfigMapFiles(t *testing.T) { bs := *configMapFilesTestBackstage.DeepCopy() cmf := &bs.Spec.Application.ExtraFiles.ConfigMaps - *cmf = append(*cmf, bsv1alpha1.ObjectKeyRef{Name: appConfigTestCm.Name}) + *cmf = append(*cmf, bsv1.ObjectKeyRef{Name: appConfigTestCm.Name}) testObj := createBackstageTest(bs).withDefaultConfig(true).addToDefaultConfig("configmap-files.yaml", "raw-cm-files.yaml") diff --git a/pkg/model/db-secret.go b/pkg/model/db-secret.go index aff1e278..99c8fe31 100644 --- a/pkg/model/db-secret.go +++ b/pkg/model/db-secret.go @@ -17,7 +17,7 @@ package model import ( "strconv" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" corev1 "k8s.io/api/core/v1" @@ -39,7 +39,7 @@ func init() { } func DbSecretDefaultName(backstageName string) string { - return utils.GenerateRuntimeObjectName(backstageName, "backstage-db") + return utils.GenerateRuntimeObjectName(backstageName, "backstage-psql-secret") } // implementation of RuntimeObject interface @@ -56,7 +56,7 @@ func (b *DbSecret) setObject(obj client.Object) { } // implementation of RuntimeObject interface -func (b *DbSecret) addToModel(model *BackstageModel, backstage bsv1alpha1.Backstage) (bool, error) { +func (b *DbSecret) addToModel(model *BackstageModel, backstage bsv1.Backstage) (bool, error) { // do not add if specified if backstage.Spec.IsAuthSecretSpecified() { @@ -78,7 +78,7 @@ func (b *DbSecret) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (b *DbSecret) validate(model *BackstageModel, backstage bsv1alpha1.Backstage) error { +func (b *DbSecret) validate(model *BackstageModel, backstage bsv1.Backstage) error { pswd, _ := utils.GeneratePassword(24) diff --git a/pkg/model/db-secret_test.go b/pkg/model/db-secret_test.go index 2b8e1829..3d292a8b 100644 --- a/pkg/model/db-secret_test.go +++ b/pkg/model/db-secret_test.go @@ -16,24 +16,25 @@ package model import ( "context" + "fmt" "testing" "k8s.io/utils/ptr" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/stretchr/testify/assert" ) -var dbSecretBackstage = &bsv1alpha1.Backstage{ +var dbSecretBackstage = &bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Database: &bsv1alpha1.Database{ + Spec: bsv1.BackstageSpec{ + Database: &bsv1.Database{ EnableLocalDb: ptr.To(false), }, }, @@ -50,7 +51,7 @@ func TestEmptyDbSecret(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, model.LocalDbSecret) - assert.Equal(t, DbSecretDefaultName(bs.Name), model.LocalDbSecret.secret.Name) + assert.Equal(t, fmt.Sprintf("backstage-psql-secret-%s", bs.Name), model.LocalDbSecret.secret.Name) dbss := model.localDbStatefulSet assert.NotNil(t, dbss) @@ -68,7 +69,7 @@ func TestDefaultWithGeneratedSecrets(t *testing.T) { model, err := InitObjects(context.TODO(), bs, testObj.externalConfig, true, false, testObj.scheme) assert.NoError(t, err) - assert.Equal(t, DbSecretDefaultName(bs.Name), model.LocalDbSecret.secret.Name) + assert.Equal(t, fmt.Sprintf("backstage-psql-secret-%s", bs.Name), model.LocalDbSecret.secret.Name) //should be generated // assert.NotEmpty(t, model.LocalDbSecret.secret.StringData["POSTGRES_USER"]) // assert.NotEmpty(t, model.LocalDbSecret.secret.StringData["POSTGRES_PASSWORD"]) diff --git a/pkg/model/db-service.go b/pkg/model/db-service.go index d6988565..ba42a237 100644 --- a/pkg/model/db-service.go +++ b/pkg/model/db-service.go @@ -17,7 +17,7 @@ package model import ( "fmt" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" corev1 "k8s.io/api/core/v1" @@ -27,7 +27,7 @@ import ( type DbServiceFactory struct{} func (f DbServiceFactory) newBackstageObject() RuntimeObject { - return &DbService{ /*service: &corev1.Service{}*/ } + return &DbService{} } type DbService struct { @@ -39,7 +39,7 @@ func init() { } func DbServiceName(backstageName string) string { - return utils.GenerateRuntimeObjectName(backstageName, "backstage-db") + return utils.GenerateRuntimeObjectName(backstageName, "backstage-psql") } // implementation of RuntimeObject interface @@ -55,7 +55,7 @@ func (b *DbService) setObject(obj client.Object) { } // implementation of RuntimeObject interface -func (b *DbService) addToModel(model *BackstageModel, _ bsv1alpha1.Backstage) (bool, error) { +func (b *DbService) addToModel(model *BackstageModel, _ bsv1.Backstage) (bool, error) { if b.service == nil { if model.localDbEnabled { return false, fmt.Errorf("LocalDb Service not initialized, make sure there is db-service.yaml.yaml in default or raw configuration") @@ -67,6 +67,9 @@ func (b *DbService) addToModel(model *BackstageModel, _ bsv1alpha1.Backstage) (b } } + // force this service to be headless even if it is not set in the original config + b.service.Spec.ClusterIP = corev1.ClusterIPNone + model.LocalDbService = b model.setRuntimeObject(b) @@ -79,11 +82,11 @@ func (b *DbService) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (b *DbService) validate(_ *BackstageModel, _ bsv1alpha1.Backstage) error { +func (b *DbService) validate(_ *BackstageModel, _ bsv1.Backstage) error { return nil } func (b *DbService) setMetaInfo(backstageName string) { b.service.SetName(DbServiceName(backstageName)) - utils.GenerateLabel(&b.service.Spec.Selector, BackstageAppLabel, fmt.Sprintf("backstage-db-%s", backstageName)) + utils.GenerateLabel(&b.service.Spec.Selector, BackstageAppLabel, utils.BackstageDbAppLabelValue(backstageName)) } diff --git a/pkg/model/db-statefulset.go b/pkg/model/db-statefulset.go index 4a5a733b..4e6a3dae 100644 --- a/pkg/model/db-statefulset.go +++ b/pkg/model/db-statefulset.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" appsv1 "k8s.io/api/apps/v1" @@ -44,7 +44,7 @@ func init() { } func DbStatefulSetName(backstageName string) string { - return utils.GenerateRuntimeObjectName(backstageName, "backstage-db") + return utils.GenerateRuntimeObjectName(backstageName, "backstage-psql") } // implementation of RuntimeObject interface @@ -60,7 +60,7 @@ func (b *DbStatefulSet) setObject(obj client.Object) { } // implementation of RuntimeObject interface -func (b *DbStatefulSet) addToModel(model *BackstageModel, _ bsv1alpha1.Backstage) (bool, error) { +func (b *DbStatefulSet) addToModel(model *BackstageModel, _ bsv1.Backstage) (bool, error) { if b.statefulSet == nil { if model.localDbEnabled { return false, fmt.Errorf("LocalDb StatefulSet not configured, make sure there is db-statefulset.yaml.yaml in default or raw configuration") @@ -90,11 +90,15 @@ func (b *DbStatefulSet) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (b *DbStatefulSet) validate(model *BackstageModel, backstage bsv1alpha1.Backstage) error { +func (b *DbStatefulSet) validate(model *BackstageModel, backstage bsv1.Backstage) error { - if backstage.Spec.Application != nil { + // point ServiceName to localDb + b.statefulSet.Spec.ServiceName = model.LocalDbService.service.Name + + if backstage.Spec.Application != nil && backstage.Spec.Application.ImagePullSecrets != nil { utils.SetImagePullSecrets(b.podSpec(), backstage.Spec.Application.ImagePullSecrets) } + if backstage.Spec.IsAuthSecretSpecified() { utils.SetDbSecretEnvVar(b.container(), backstage.Spec.Database.AuthSecretName) } else if model.LocalDbSecret != nil { @@ -105,8 +109,8 @@ func (b *DbStatefulSet) validate(model *BackstageModel, backstage bsv1alpha1.Bac func (b *DbStatefulSet) setMetaInfo(backstageName string) { b.statefulSet.SetName(DbStatefulSetName(backstageName)) - utils.GenerateLabel(&b.statefulSet.Spec.Template.ObjectMeta.Labels, BackstageAppLabel, fmt.Sprintf("backstage-db-%s", backstageName)) - utils.GenerateLabel(&b.statefulSet.Spec.Selector.MatchLabels, BackstageAppLabel, fmt.Sprintf("backstage-db-%s", backstageName)) + utils.GenerateLabel(&b.statefulSet.Spec.Template.ObjectMeta.Labels, BackstageAppLabel, utils.BackstageDbAppLabelValue(backstageName)) + utils.GenerateLabel(&b.statefulSet.Spec.Selector.MatchLabels, BackstageAppLabel, utils.BackstageDbAppLabelValue(backstageName)) } // returns DB container diff --git a/pkg/model/db-statefulset_test.go b/pkg/model/db-statefulset_test.go index 778a0d4c..24ceddac 100644 --- a/pkg/model/db-statefulset_test.go +++ b/pkg/model/db-statefulset_test.go @@ -19,26 +19,40 @@ import ( "os" "testing" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/stretchr/testify/assert" ) -var dbStatefulSetBackstage = &bsv1alpha1.Backstage{ +var dbStatefulSetBackstage = &bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Database: &bsv1alpha1.Database{}, - Application: &bsv1alpha1.Application{}, + Spec: bsv1.BackstageSpec{ + Database: &bsv1.Database{}, + Application: &bsv1.Application{}, }, } +// test default StatefulSet +func TestDefault(t *testing.T) { + bs := *dbStatefulSetBackstage.DeepCopy() + testObj := createBackstageTest(bs).withDefaultConfig(true) + + model, err := InitObjects(context.TODO(), bs, testObj.externalConfig, true, false, testObj.scheme) + assert.NoError(t, err) + + assert.Equal(t, model.LocalDbService.service.Name, model.localDbStatefulSet.statefulSet.Spec.ServiceName) + assert.Equal(t, corev1.ClusterIPNone, model.LocalDbService.service.Spec.ClusterIP) +} + // It tests the overriding image feature func TestOverrideDbImage(t *testing.T) { bs := *dbStatefulSetBackstage.DeepCopy() diff --git a/pkg/model/deployment.go b/pkg/model/deployment.go index 67c5be79..607c425c 100644 --- a/pkg/model/deployment.go +++ b/pkg/model/deployment.go @@ -18,11 +18,16 @@ import ( "fmt" "os" + kyaml "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/kustomize/kyaml/yaml/merge2" + + "sigs.k8s.io/yaml" + "k8s.io/utils/ptr" corev1 "k8s.io/api/core/v1" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" @@ -32,6 +37,7 @@ import ( const BackstageImageEnvVar = "RELATED_IMAGE_backstage" const defaultMountDir = "/opt/app-root/src" +const ExtConfigHashAnnotation = "rhdh.redhat.com/ext-config-hash" type BackstageDeploymentFactory struct{} @@ -69,10 +75,20 @@ func (b *BackstageDeployment) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (b *BackstageDeployment) addToModel(model *BackstageModel, _ bsv1alpha1.Backstage) (bool, error) { +func (b *BackstageDeployment) addToModel(model *BackstageModel, backstage bsv1.Backstage) (bool, error) { if b.deployment == nil { return false, fmt.Errorf("Backstage Deployment is not initialized, make sure there is deployment.yaml in default or raw configuration") } + + if b.deployment.Spec.Template.ObjectMeta.Annotations == nil { + b.deployment.Spec.Template.ObjectMeta.Annotations = map[string]string{} + } + b.deployment.Spec.Template.ObjectMeta.Annotations[ExtConfigHashAnnotation] = model.ExternalConfig.GetHash() + + if err := b.setDeployment(backstage); err != nil { + return false, err + } + model.backstageDeployment = b model.setRuntimeObject(b) @@ -86,14 +102,7 @@ func (b *BackstageDeployment) addToModel(model *BackstageModel, _ bsv1alpha1.Bac } // implementation of RuntimeObject interface -func (b *BackstageDeployment) validate(model *BackstageModel, backstage bsv1alpha1.Backstage) error { - - if backstage.Spec.Application != nil { - b.setReplicas(backstage.Spec.Application.Replicas) - utils.SetImagePullSecrets(b.podSpec(), backstage.Spec.Application.ImagePullSecrets) - b.setImage(backstage.Spec.Application.Image) - b.addExtraEnvs(backstage.Spec.Application.ExtraEnvs) - } +func (b *BackstageDeployment) validate(model *BackstageModel, backstage bsv1.Backstage) error { for _, bso := range model.RuntimeObjects { if bs, ok := bso.(BackstagePodContributor); ok { @@ -130,8 +139,8 @@ func (b *BackstageDeployment) validate(model *BackstageModel, backstage bsv1alph func (b *BackstageDeployment) setMetaInfo(backstageName string) { b.deployment.SetName(DeploymentName(backstageName)) - utils.GenerateLabel(&b.deployment.Spec.Template.ObjectMeta.Labels, BackstageAppLabel, fmt.Sprintf("backstage-%s", backstageName)) - utils.GenerateLabel(&b.deployment.Spec.Selector.MatchLabels, BackstageAppLabel, fmt.Sprintf("backstage-%s", backstageName)) + utils.GenerateLabel(&b.deployment.Spec.Template.ObjectMeta.Labels, BackstageAppLabel, utils.BackstageAppLabelValue(backstageName)) + utils.GenerateLabel(&b.deployment.Spec.Selector.MatchLabels, BackstageAppLabel, utils.BackstageAppLabelValue(backstageName)) } func (b *BackstageDeployment) container() *corev1.Container { @@ -142,6 +151,39 @@ func (b *BackstageDeployment) podSpec() *corev1.PodSpec { return &b.deployment.Spec.Template.Spec } +func (b *BackstageDeployment) setDeployment(backstage bsv1.Backstage) error { + + // set from backstage.Spec.Application + if backstage.Spec.Application != nil { + b.setReplicas(backstage.Spec.Application.Replicas) + utils.SetImagePullSecrets(b.podSpec(), backstage.Spec.Application.ImagePullSecrets) + b.setImage(backstage.Spec.Application.Image) + b.addExtraEnvs(backstage.Spec.Application.ExtraEnvs) + } + + // set from backstage.Spec.Deployment + if backstage.Spec.Deployment != nil { + if conf := backstage.Spec.Deployment.Patch; conf != nil { + + deplStr, err := yaml.Marshal(b.deployment) + if err != nil { + return fmt.Errorf("can not marshal deployment object: %w", err) + } + + merged, err := merge2.MergeStrings(string(conf.Raw), string(deplStr), false, kyaml.MergeOptions{}) + if err != nil { + return fmt.Errorf("can not merge spec.deployment: %w", err) + } + + err = yaml.Unmarshal([]byte(merged), b.deployment) + if err != nil { + return fmt.Errorf("can not unmarshal merged deployment: %w", err) + } + } + } + return nil +} + // sets the amount of replicas (used by CR config) func (b *BackstageDeployment) setReplicas(replicas *int32) { if replicas != nil { @@ -167,7 +209,7 @@ func (b *BackstageDeployment) setImage(image *string) { } // adds environment variables to the Backstage Container -func (b *BackstageDeployment) addContainerEnvVar(env bsv1alpha1.Env) { +func (b *BackstageDeployment) addContainerEnvVar(env bsv1.Env) { b.container().Env = append(b.deployment.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ Name: env.Name, @@ -176,7 +218,7 @@ func (b *BackstageDeployment) addContainerEnvVar(env bsv1alpha1.Env) { } // adds environment from source to the Backstage Container -func (b *BackstageDeployment) addExtraEnvs(extraEnvs *bsv1alpha1.ExtraEnvs) { +func (b *BackstageDeployment) addExtraEnvs(extraEnvs *bsv1.ExtraEnvs) { if extraEnvs != nil { for _, e := range extraEnvs.Envs { b.addContainerEnvVar(e) diff --git a/pkg/model/deployment_test.go b/pkg/model/deployment_test.go index 261ceb86..f257cbaa 100644 --- a/pkg/model/deployment_test.go +++ b/pkg/model/deployment_test.go @@ -18,25 +18,27 @@ import ( "context" "testing" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/utils/ptr" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/stretchr/testify/assert" ) -var deploymentTestBackstage = bsv1alpha1.Backstage{ +var deploymentTestBackstage = bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Database: &bsv1alpha1.Database{ + Spec: bsv1.BackstageSpec{ + Database: &bsv1.Database{ EnableLocalDb: ptr.To(false), }, - Application: &bsv1alpha1.Application{}, + Application: &bsv1.Application{}, }, } @@ -104,3 +106,94 @@ func TestSpecImagePullSecrets(t *testing.T) { assert.Equal(t, 0, len(model.backstageDeployment.deployment.Spec.Template.Spec.ImagePullSecrets)) } + +func TestMergeFromSpecDeployment(t *testing.T) { + bs := *deploymentTestBackstage.DeepCopy() + bs.Spec.Deployment = &bsv1.BackstageDeployment{} + bs.Spec.Deployment.Patch = &apiextensionsv1.JSON{ + Raw: []byte(` +metadata: + labels: + mylabel: java +spec: + template: + metadata: + labels: + pod: backstage + spec: + containers: + - name: sidecar + image: my-image:1.0.0 + - name: backstage-backend + resources: + requests: + cpu: 251m + memory: 257Mi + volumes: + - ephemeral: + volumeClaimTemplate: + spec: + storageClassName: "special" + name: dynamic-plugins-root + - emptyDir: + name: my-vol +`), + } + + testObj := createBackstageTest(bs).withDefaultConfig(true). + addToDefaultConfig("deployment.yaml", "janus-deployment.yaml") + + model, err := InitObjects(context.TODO(), bs, testObj.externalConfig, true, true, testObj.scheme) + assert.NoError(t, err) + + // label added + assert.Equal(t, "java", model.backstageDeployment.deployment.Labels["mylabel"]) + assert.Equal(t, "backstage", model.backstageDeployment.deployment.Spec.Template.Labels["pod"]) + + // sidecar added + assert.Equal(t, 2, len(model.backstageDeployment.deployment.Spec.Template.Spec.Containers)) + assert.Equal(t, "sidecar", model.backstageDeployment.deployment.Spec.Template.Spec.Containers[1].Name) + assert.Equal(t, "my-image:1.0.0", model.backstageDeployment.deployment.Spec.Template.Spec.Containers[1].Image) + + // backstage container resources updated + assert.Equal(t, "backstage-backend", model.backstageDeployment.container().Name) + assert.Equal(t, "257Mi", model.backstageDeployment.container().Resources.Requests.Memory().String()) + + // volumes + // dynamic-plugins-root, dynamic-plugins-npmrc, my-vol + assert.Equal(t, 3, len(model.backstageDeployment.deployment.Spec.Template.Spec.Volumes)) + assert.Equal(t, "dynamic-plugins-root", model.backstageDeployment.deployment.Spec.Template.Spec.Volumes[0].Name) + // overrides StorageClassName + assert.Equal(t, "special", *model.backstageDeployment.deployment.Spec.Template.Spec.Volumes[0].Ephemeral.VolumeClaimTemplate.Spec.StorageClassName) + // adds new volume + assert.Equal(t, "my-vol", model.backstageDeployment.deployment.Spec.Template.Spec.Volumes[2].Name) +} + +// to remove when stop supporting v1alpha1 +func TestDeploymentFieldPrevailsOnDeprecated(t *testing.T) { + bs := *deploymentTestBackstage.DeepCopy() + bs.Spec.Application.Image = ptr.To("app-image") + bs.Spec.Application.Replicas = ptr.To(int32(2)) + bs.Spec.Deployment = &bsv1.BackstageDeployment{} + bs.Spec.Deployment.Patch = &apiextensionsv1.JSON{ + Raw: []byte(` +spec: + replicas: 3 + template: + spec: + containers: + - name: backstage-backend + image: deployment-image +`), + } + + testObj := createBackstageTest(bs).withDefaultConfig(true). + addToDefaultConfig("deployment.yaml", "janus-deployment.yaml") + + model, err := InitObjects(context.TODO(), bs, testObj.externalConfig, true, true, testObj.scheme) + assert.NoError(t, err) + + assert.Equal(t, "backstage-backend", model.backstageDeployment.container().Name) + assert.Equal(t, "deployment-image", model.backstageDeployment.container().Image) + assert.Equal(t, int32(3), *model.backstageDeployment.deployment.Spec.Replicas) +} diff --git a/pkg/model/dynamic-plugins.go b/pkg/model/dynamic-plugins.go index 6e528a45..f0235ea0 100644 --- a/pkg/model/dynamic-plugins.go +++ b/pkg/model/dynamic-plugins.go @@ -20,7 +20,7 @@ import ( appsv1 "k8s.io/api/apps/v1" - "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" corev1 "k8s.io/api/core/v1" @@ -48,17 +48,22 @@ func DynamicPluginsDefaultName(backstageName string) string { return utils.GenerateRuntimeObjectName(backstageName, "backstage-dynamic-plugins") } -func addDynamicPlugins(spec v1alpha1.BackstageSpec, deployment *appsv1.Deployment, model *BackstageModel) error { +func addDynamicPlugins(spec v1alpha2.BackstageSpec, deployment *appsv1.Deployment, model *BackstageModel) error { if spec.Application == nil || spec.Application.DynamicPluginsConfigMapName == "" { return nil } if _, ic := DynamicPluginsInitContainer(deployment.Spec.Template.Spec.InitContainers); ic == nil { - return fmt.Errorf("deployment validation failed, dynamic plugin name configured but no InitContainer %s defined", dynamicPluginInitContainerName) + return fmt.Errorf("validation failed, dynamic plugin name configured but no InitContainer %s defined", dynamicPluginInitContainerName) } dp := DynamicPlugins{ConfigMap: &model.ExternalConfig.DynamicPlugins} + + if dp.ConfigMap.Data == nil || len(dp.ConfigMap.Data) != 1 || dp.ConfigMap.Data[DynamicPluginsFile] == "" { + return fmt.Errorf("dynamic plugin configMap expects exactly one key named '%s' ", DynamicPluginsFile) + } + dp.updatePod(deployment) return nil @@ -83,7 +88,7 @@ func (p *DynamicPlugins) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (p *DynamicPlugins) addToModel(model *BackstageModel, backstage v1alpha1.Backstage) (bool, error) { +func (p *DynamicPlugins) addToModel(model *BackstageModel, backstage v1alpha2.Backstage) (bool, error) { if p.ConfigMap == nil || (backstage.Spec.Application != nil && backstage.Spec.Application.DynamicPluginsConfigMapName != "") { return false, nil @@ -119,7 +124,7 @@ func (p *DynamicPlugins) updatePod(deployment *appsv1.Deployment) { // implementation of RuntimeObject interface // ConfigMap name must be the same as (deployment.yaml).spec.template.spec.volumes.name.dynamic-plugins-conf.ConfigMap.name -func (p *DynamicPlugins) validate(model *BackstageModel, _ v1alpha1.Backstage) error { +func (p *DynamicPlugins) validate(model *BackstageModel, _ v1alpha2.Backstage) error { _, initContainer := DynamicPluginsInitContainer(model.backstageDeployment.deployment.Spec.Template.Spec.InitContainers) if initContainer == nil { diff --git a/pkg/model/dynamic-plugins_test.go b/pkg/model/dynamic-plugins_test.go index 8037c4af..7c4f7351 100644 --- a/pkg/model/dynamic-plugins_test.go +++ b/pkg/model/dynamic-plugins_test.go @@ -21,7 +21,7 @@ import ( "k8s.io/utils/ptr" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,16 +29,16 @@ import ( "github.com/stretchr/testify/assert" ) -var testDynamicPluginsBackstage = bsv1alpha1.Backstage{ +var testDynamicPluginsBackstage = bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Database: &bsv1alpha1.Database{ + Spec: bsv1.BackstageSpec{ + Database: &bsv1.Database{ EnableLocalDb: ptr.To(false), }, - Application: &bsv1alpha1.Application{}, + Application: &bsv1.Application{}, }, } @@ -56,7 +56,28 @@ func TestDynamicPluginsValidationFailed(t *testing.T) { } -// Janus pecific test +func TestDynamicPluginsInvalidKeyName(t *testing.T) { + bs := testDynamicPluginsBackstage.DeepCopy() + + bs.Spec.Application.DynamicPluginsConfigMapName = "dplugin" + + testObj := createBackstageTest(*bs).withDefaultConfig(true). + addToDefaultConfig("dynamic-plugins.yaml", "raw-dynamic-plugins.yaml"). + addToDefaultConfig("deployment.yaml", "janus-deployment.yaml") + + testObj.externalConfig.DynamicPlugins = corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "dplugin"}, + Data: map[string]string{"WrongKeyName.yml": "tt"}, + } + + _, err := InitObjects(context.TODO(), *bs, testObj.externalConfig, true, false, testObj.scheme) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "expects exactly one key named 'dynamic-plugins.yaml'") + +} + +// Janus specific test func TestDefaultDynamicPlugins(t *testing.T) { bs := testDynamicPluginsBackstage.DeepCopy() @@ -92,7 +113,10 @@ func TestDefaultAndSpecifiedDynamicPlugins(t *testing.T) { addToDefaultConfig("dynamic-plugins.yaml", "raw-dynamic-plugins.yaml"). addToDefaultConfig("deployment.yaml", "janus-deployment.yaml") - testObj.externalConfig.DynamicPlugins = corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "dplugin"}} + testObj.externalConfig.DynamicPlugins = corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "dplugin"}, + Data: map[string]string{DynamicPluginsFile: "tt"}, + } model, err := InitObjects(context.TODO(), *bs, testObj.externalConfig, true, false, testObj.scheme) @@ -106,7 +130,6 @@ func TestDefaultAndSpecifiedDynamicPlugins(t *testing.T) { //vol-dplugin assert.Equal(t, 3, len(ic.VolumeMounts)) assert.Equal(t, utils.GenerateVolumeNameFromCmOrSecret("dplugin"), ic.VolumeMounts[2].Name) - //t.Log(">>>>>>>>>>>>>>>>", ic.VolumeMounts) } func TestDynamicPluginsFailOnArbitraryDepl(t *testing.T) { diff --git a/pkg/model/externalconfig.go b/pkg/model/externalconfig.go new file mode 100644 index 00000000..13843d76 --- /dev/null +++ b/pkg/model/externalconfig.go @@ -0,0 +1,75 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ExtConfigSyncLabel = "rhdh.redhat.com/ext-config-sync" +const BackstageNameAnnotation = "rhdh.redhat.com/backstage-name" + +type ExternalConfig struct { + RawConfig map[string]string + AppConfigs map[string]corev1.ConfigMap + ExtraFileConfigMaps map[string]corev1.ConfigMap + ExtraFileSecrets map[string]corev1.Secret + ExtraEnvConfigMaps map[string]corev1.ConfigMap + ExtraEnvSecrets map[string]corev1.Secret + DynamicPlugins corev1.ConfigMap + + syncedContent []byte +} + +func NewExternalConfig() ExternalConfig { + + return ExternalConfig{ + RawConfig: map[string]string{}, + AppConfigs: map[string]corev1.ConfigMap{}, + ExtraFileConfigMaps: map[string]corev1.ConfigMap{}, + ExtraFileSecrets: map[string]corev1.Secret{}, + ExtraEnvConfigMaps: map[string]corev1.ConfigMap{}, + ExtraEnvSecrets: map[string]corev1.Secret{}, + DynamicPlugins: corev1.ConfigMap{}, + + syncedContent: []byte{}, + } +} + +func (e *ExternalConfig) GetHash() string { + h := sha256.New() + h.Write([]byte(e.syncedContent)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (e *ExternalConfig) AddToSyncedConfig(content client.Object) error { + + if content.GetLabels()[ExtConfigSyncLabel] == "" || content.GetAnnotations()[BackstageNameAnnotation] == "" { + return nil + } + + d, err := json.Marshal(content) + if err != nil { + return err + } + + e.syncedContent = append(e.syncedContent, d...) + return nil +} diff --git a/pkg/model/interfaces.go b/pkg/model/interfaces.go index 859ce115..cc679590 100644 --- a/pkg/model/interfaces.go +++ b/pkg/model/interfaces.go @@ -15,7 +15,7 @@ package model import ( - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" appsv1 "k8s.io/api/apps/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -45,10 +45,10 @@ type RuntimeObject interface { EmptyObject() client.Object // adds runtime object to the model // returns false if the object was not added to the model (not configured) - addToModel(model *BackstageModel, backstage bsv1alpha1.Backstage) (bool, error) + addToModel(model *BackstageModel, backstage bsv1.Backstage) (bool, error) // at this stage all the information is updated // set the final references validates the object at the end of initialization - validate(model *BackstageModel, backstage bsv1alpha1.Backstage) error + validate(model *BackstageModel, backstage bsv1.Backstage) error // sets object name, labels and other necessary meta information setMetaInfo(backstageName string) } diff --git a/pkg/model/model_tests.go b/pkg/model/model_tests.go index 3948d2c8..7dde3751 100644 --- a/pkg/model/model_tests.go +++ b/pkg/model/model_tests.go @@ -27,22 +27,22 @@ import ( "k8s.io/apimachinery/pkg/runtime" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" ) // testBackstageObject it is a helper object to simplify testing model component allowing to customize and isolate testing configuration // usual sequence of creating testBackstageObject contains such a steps: -// createBackstageTest(bsv1alpha1.Backstage). +// createBackstageTest(bsv1.Backstage). // withDefaultConfig(useDef bool) // addToDefaultConfig(key, fileName) type testBackstageObject struct { - backstage bsv1alpha1.Backstage + backstage bsv1.Backstage externalConfig ExternalConfig scheme *runtime.Scheme } // initialises testBackstageObject object -func createBackstageTest(bs bsv1alpha1.Backstage) *testBackstageObject { +func createBackstageTest(bs bsv1.Backstage) *testBackstageObject { ec := ExternalConfig{ RawConfig: map[string]string{}, AppConfigs: map[string]corev1.ConfigMap{}, @@ -50,7 +50,7 @@ func createBackstageTest(bs bsv1alpha1.Backstage) *testBackstageObject { ExtraEnvConfigMaps: map[string]corev1.ConfigMap{}, } b := &testBackstageObject{backstage: bs, externalConfig: ec, scheme: runtime.NewScheme()} - utilruntime.Must(bsv1alpha1.AddToScheme(b.scheme)) + utilruntime.Must(bsv1.AddToScheme(b.scheme)) return b } diff --git a/pkg/model/route.go b/pkg/model/route.go index 56601949..02767af8 100644 --- a/pkg/model/route.go +++ b/pkg/model/route.go @@ -15,7 +15,7 @@ package model import ( - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" openshift "github.com/openshift/api/route/v1" @@ -36,7 +36,7 @@ func RouteName(backstageName string) string { return utils.GenerateRuntimeObjectName(backstageName, "backstage") } -func (b *BackstageRoute) setRoute(specified *bsv1alpha1.Route) { +func (b *BackstageRoute) setRoute(specified *bsv1.Route) { if len(specified.Host) > 0 { b.route.Spec.Host = specified.Host @@ -101,7 +101,7 @@ func (b *BackstageRoute) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (b *BackstageRoute) addToModel(model *BackstageModel, backstage bsv1alpha1.Backstage) (bool, error) { +func (b *BackstageRoute) addToModel(model *BackstageModel, backstage bsv1.Backstage) (bool, error) { // not Openshift if !model.isOpenshift { @@ -137,7 +137,7 @@ func (b *BackstageRoute) addToModel(model *BackstageModel, backstage bsv1alpha1. } // implementation of RuntimeObject interface -func (b *BackstageRoute) validate(model *BackstageModel, _ bsv1alpha1.Backstage) error { +func (b *BackstageRoute) validate(model *BackstageModel, _ bsv1.Backstage) error { b.route.Spec.To.Name = model.backstageService.service.Name return nil } diff --git a/pkg/model/route_test.go b/pkg/model/route_test.go index c035af39..9189ce14 100644 --- a/pkg/model/route_test.go +++ b/pkg/model/route_test.go @@ -24,20 +24,20 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "github.com/stretchr/testify/assert" ) func TestDefaultRoute(t *testing.T) { - bs := bsv1alpha1.Backstage{ + bs := bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "TestSpecifiedRoute", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Application: &bsv1alpha1.Application{ - Route: &bsv1alpha1.Route{}, + Spec: bsv1.BackstageSpec{ + Application: &bsv1.Application{ + Route: &bsv1.Route{}, }, }, } @@ -63,14 +63,14 @@ func TestDefaultRoute(t *testing.T) { } func TestSpecifiedRoute(t *testing.T) { - bs := bsv1alpha1.Backstage{ + bs := bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "TestSpecifiedRoute", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Application: &bsv1alpha1.Application{ - Route: &bsv1alpha1.Route{ + Spec: bsv1.BackstageSpec{ + Application: &bsv1.Application{ + Route: &bsv1.Route{ Enabled: ptr.To(true), Host: "TestSpecifiedRoute", //TLS: nil, @@ -109,14 +109,14 @@ func TestSpecifiedRoute(t *testing.T) { func TestDisabledRoute(t *testing.T) { // Route.Enabled = false - bs := bsv1alpha1.Backstage{ + bs := bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "TestSpecifiedRoute", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Application: &bsv1alpha1.Application{ - Route: &bsv1alpha1.Route{ + Spec: bsv1.BackstageSpec{ + Application: &bsv1.Application{ + Route: &bsv1.Route{ Enabled: ptr.To(false), Host: "TestSpecifiedRoute", //TLS: nil, @@ -141,12 +141,12 @@ func TestDisabledRoute(t *testing.T) { func TestExcludedRoute(t *testing.T) { // No route configured - bs := bsv1alpha1.Backstage{ + bs := bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "TestSpecifiedRoute", Namespace: "ns123", }, - //Spec: bsv1alpha1.BackstageSpec{ // //Application: &bsv1alpha1.Application{}, + //Spec: bsv1.BackstageSpec{ // //Application: &bsv1.Application{}, //}, } @@ -165,14 +165,14 @@ func TestExcludedRoute(t *testing.T) { func TestEnabledRoute(t *testing.T) { // Route is enabled by default if configured - bs := bsv1alpha1.Backstage{ + bs := bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "TestSpecifiedRoute", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Application: &bsv1alpha1.Application{ - Route: &bsv1alpha1.Route{}, + Spec: bsv1.BackstageSpec{ + Application: &bsv1.Application{ + Route: &bsv1.Route{}, }, }, } diff --git a/pkg/model/runtime.go b/pkg/model/runtime.go index 671288ed..cdf26b49 100644 --- a/pkg/model/runtime.go +++ b/pkg/model/runtime.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" ) @@ -37,11 +37,7 @@ import ( const BackstageAppLabel = "rhdh.redhat.com/app" // Backstage configuration scaffolding with empty BackstageObjects. -// There are all possible objects for configuration, can be: -// Mandatory - Backstage Deployment (Pod), Service -// Optional - mostly (but not only) Backstage Pod configuration objects (AppConfig, ExtraConfig) -// ForLocalDatabase - mandatory if EnabledLocalDb, ignored otherwise -// ForOpenshift - if configured, used for Openshift deployment, ignored otherwise +// There are all possible objects for configuration var runtimeConfig []ObjectConfig // BackstageModel represents internal object model @@ -61,8 +57,6 @@ type BackstageModel struct { RuntimeObjects []RuntimeObject ExternalConfig ExternalConfig - - //appConfigs []SpecifiedConfigMap } type SpecifiedConfigMap struct { @@ -70,14 +64,6 @@ type SpecifiedConfigMap struct { Key string } -type ExternalConfig struct { - RawConfig map[string]string - AppConfigs map[string]corev1.ConfigMap - ExtraFileConfigMaps map[string]corev1.ConfigMap - ExtraEnvConfigMaps map[string]corev1.ConfigMap - DynamicPlugins corev1.ConfigMap -} - func (m *BackstageModel) setRuntimeObject(object RuntimeObject) { for i, obj := range m.RuntimeObjects { if reflect.TypeOf(obj) == reflect.TypeOf(object) { @@ -119,7 +105,7 @@ func registerConfig(key string, factory ObjectFactory) { } // InitObjects performs a main loop for configuring and making the array of objects to reconcile -func InitObjects(ctx context.Context, backstage bsv1alpha1.Backstage, externalConfig ExternalConfig, ownsRuntime bool, isOpenshift bool, scheme *runtime.Scheme) (*BackstageModel, error) { +func InitObjects(ctx context.Context, backstage bsv1.Backstage, externalConfig ExternalConfig, ownsRuntime bool, isOpenshift bool, scheme *runtime.Scheme) (*BackstageModel, error) { // 3 phases of Backstage configuration: // 1- load from Operator defaults, modify metadata (labels, selectors..) and namespace as needed @@ -160,7 +146,7 @@ func InitObjects(ctx context.Context, backstage bsv1alpha1.Backstage, externalCo // apply spec and add the object to the model and list if added, err := backstageObject.addToModel(model, backstage); err != nil { - return nil, fmt.Errorf("failed to initialize %s reason: %s", backstageObject, err) + return nil, fmt.Errorf("failed to initialize backstage, reason: %s", err) } else if added { setMetaInfo(backstageObject, backstage, ownsRuntime, scheme) } @@ -181,7 +167,7 @@ func InitObjects(ctx context.Context, backstage bsv1alpha1.Backstage, externalCo } // Every RuntimeObject.setMetaInfo should as minimum call this -func setMetaInfo(modelObject RuntimeObject, backstage bsv1alpha1.Backstage, ownsRuntime bool, scheme *runtime.Scheme) { +func setMetaInfo(modelObject RuntimeObject, backstage bsv1.Backstage, ownsRuntime bool, scheme *runtime.Scheme) { modelObject.setMetaInfo(backstage.Name) modelObject.Object().SetNamespace(backstage.Namespace) modelObject.Object().SetLabels(utils.SetKubeLabels(modelObject.Object().GetLabels(), backstage.Name)) diff --git a/pkg/model/runtime_test.go b/pkg/model/runtime_test.go index 5d41d301..f69ac680 100644 --- a/pkg/model/runtime_test.go +++ b/pkg/model/runtime_test.go @@ -17,30 +17,38 @@ package model import ( "context" "fmt" + "testing" "k8s.io/utils/ptr" - "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/stretchr/testify/assert" ) -//const backstageContainerName = "backstage-backend" +func TestIfEmptyObjectsContainTypeinfo(t *testing.T) { + for _, cfg := range runtimeConfig { + obj := cfg.ObjectFactory.newBackstageObject() + assert.NotNil(t, obj.EmptyObject()) + // TODO uncomment when Kind is available + //assert.NotEmpty(t, obj.EmptyObject().GetObjectKind().GroupVersionKind().Kind) + } +} // NOTE: to make it work locally env var LOCALBIN should point to the directory where default-config folder located func TestInitDefaultDeploy(t *testing.T) { - bs := v1alpha1.Backstage{ + bs := v1alpha2.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: v1alpha1.BackstageSpec{ - Database: &v1alpha1.Database{ + Spec: v1alpha2.BackstageSpec{ + Database: &v1alpha2.Database{ EnableLocalDb: ptr.To(false), }, }, @@ -70,13 +78,13 @@ func TestInitDefaultDeploy(t *testing.T) { func TestIfEmptyObjectIsValid(t *testing.T) { - bs := bsv1alpha1.Backstage{ + bs := bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Database: &bsv1alpha1.Database{ + Spec: bsv1.BackstageSpec{ + Database: &bsv1.Database{ EnableLocalDb: ptr.To(false), }, }, @@ -95,13 +103,13 @@ func TestIfEmptyObjectIsValid(t *testing.T) { func TestAddToModel(t *testing.T) { - bs := v1alpha1.Backstage{ + bs := v1alpha2.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: v1alpha1.BackstageSpec{ - Database: &v1alpha1.Database{ + Spec: v1alpha2.BackstageSpec{ + Database: &v1alpha2.Database{ EnableLocalDb: ptr.To(false), }, }, diff --git a/pkg/model/secretenvs.go b/pkg/model/secretenvs.go index a6277351..86ed9660 100644 --- a/pkg/model/secretenvs.go +++ b/pkg/model/secretenvs.go @@ -15,7 +15,7 @@ package model import ( - "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" appsv1 "k8s.io/api/apps/v1" @@ -44,7 +44,7 @@ func (p *SecretEnvs) Object() client.Object { return p.Secret } -func addSecretEnvs(spec v1alpha1.BackstageSpec, deployment *appsv1.Deployment) error { +func addSecretEnvs(spec v1alpha2.BackstageSpec, deployment *appsv1.Deployment) error { if spec.Application == nil || spec.Application.ExtraEnvs == nil || spec.Application.ExtraEnvs.Secrets == nil { return nil @@ -73,7 +73,7 @@ func (p *SecretEnvs) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (p *SecretEnvs) addToModel(model *BackstageModel, _ v1alpha1.Backstage) (bool, error) { +func (p *SecretEnvs) addToModel(model *BackstageModel, _ v1alpha2.Backstage) (bool, error) { if p.Secret != nil { model.setRuntimeObject(p) return true, nil @@ -82,7 +82,7 @@ func (p *SecretEnvs) addToModel(model *BackstageModel, _ v1alpha1.Backstage) (bo } // implementation of RuntimeObject interface -func (p *SecretEnvs) validate(_ *BackstageModel, _ v1alpha1.Backstage) error { +func (p *SecretEnvs) validate(_ *BackstageModel, _ v1alpha2.Backstage) error { return nil } diff --git a/pkg/model/secretfiles.go b/pkg/model/secretfiles.go index 17430ab3..93dec9e1 100644 --- a/pkg/model/secretfiles.go +++ b/pkg/model/secretfiles.go @@ -20,7 +20,7 @@ import ( appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" corev1 "k8s.io/api/core/v1" @@ -43,7 +43,7 @@ func init() { registerConfig("secret-files.yaml", SecretFilesFactory{}) } -func addSecretFiles(spec v1alpha1.BackstageSpec, deployment *appsv1.Deployment) error { +func addSecretFiles(spec v1alpha2.BackstageSpec, deployment *appsv1.Deployment) error { if spec.Application == nil || spec.Application.ExtraFiles == nil || spec.Application.ExtraFiles.Secrets == nil { return nil @@ -90,7 +90,7 @@ func (p *SecretFiles) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (p *SecretFiles) addToModel(model *BackstageModel, _ v1alpha1.Backstage) (bool, error) { +func (p *SecretFiles) addToModel(model *BackstageModel, _ v1alpha2.Backstage) (bool, error) { if p.Secret != nil { model.setRuntimeObject(p) return true, nil @@ -99,7 +99,7 @@ func (p *SecretFiles) addToModel(model *BackstageModel, _ v1alpha1.Backstage) (b } // implementation of RuntimeObject interface -func (p *SecretFiles) validate(_ *BackstageModel, _ v1alpha1.Backstage) error { +func (p *SecretFiles) validate(_ *BackstageModel, _ v1alpha2.Backstage) error { return nil } diff --git a/pkg/model/secretfiles_test.go b/pkg/model/secretfiles_test.go index c463d470..98718707 100644 --- a/pkg/model/secretfiles_test.go +++ b/pkg/model/secretfiles_test.go @@ -16,15 +16,14 @@ package model import ( "context" + "testing" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" - "github.com/stretchr/testify/assert" ) @@ -45,16 +44,16 @@ var ( // StringData: map[string]string{"conf2.yaml": ""}, //} - secretFilesTestBackstage = bsv1alpha1.Backstage{ + secretFilesTestBackstage = bsv1.Backstage{ ObjectMeta: metav1.ObjectMeta{ Name: "bs", Namespace: "ns123", }, - Spec: bsv1alpha1.BackstageSpec{ - Application: &bsv1alpha1.Application{ - ExtraFiles: &bsv1alpha1.ExtraFiles{ + Spec: bsv1.BackstageSpec{ + Application: &bsv1.Application{ + ExtraFiles: &bsv1.ExtraFiles{ MountPath: "/my/path", - Secrets: []bsv1alpha1.ObjectKeyRef{}, + Secrets: []bsv1.ObjectKeyRef{}, }, }, }, @@ -83,8 +82,10 @@ func TestSpecifiedSecretFiles(t *testing.T) { bs := *secretFilesTestBackstage.DeepCopy() sf := &bs.Spec.Application.ExtraFiles.Secrets - *sf = append(*sf, bsv1alpha1.ObjectKeyRef{Name: "secret1", Key: "conf.yaml"}) - *sf = append(*sf, bsv1alpha1.ObjectKeyRef{Name: "secret2", Key: "conf.yaml"}) + *sf = append(*sf, bsv1.ObjectKeyRef{Name: "secret1", Key: "conf.yaml"}) + *sf = append(*sf, bsv1.ObjectKeyRef{Name: "secret2", Key: "conf.yaml"}) + // https://issues.redhat.com/browse/RHIDP-2246 - mounting secret/CM with dot in the name + *sf = append(*sf, bsv1.ObjectKeyRef{Name: "secret.dot", Key: "conf3.yaml"}) testObj := createBackstageTest(bs).withDefaultConfig(true) @@ -96,11 +97,13 @@ func TestSpecifiedSecretFiles(t *testing.T) { deployment := model.backstageDeployment assert.NotNil(t, deployment) - assert.Equal(t, 2, len(deployment.deployment.Spec.Template.Spec.Containers[0].VolumeMounts)) + assert.Equal(t, 3, len(deployment.deployment.Spec.Template.Spec.Containers[0].VolumeMounts)) assert.Equal(t, 0, len(deployment.deployment.Spec.Template.Spec.Containers[0].Args)) - assert.Equal(t, 2, len(deployment.deployment.Spec.Template.Spec.Volumes)) + assert.Equal(t, 3, len(deployment.deployment.Spec.Template.Spec.Volumes)) assert.Equal(t, utils.GenerateVolumeNameFromCmOrSecret("secret1"), deployment.podSpec().Volumes[0].Name) + assert.Equal(t, utils.GenerateVolumeNameFromCmOrSecret("secret2"), deployment.podSpec().Volumes[1].Name) + assert.Equal(t, utils.GenerateVolumeNameFromCmOrSecret("secret.dot"), deployment.podSpec().Volumes[2].Name) } @@ -108,7 +111,7 @@ func TestDefaultAndSpecifiedSecretFiles(t *testing.T) { bs := *secretFilesTestBackstage.DeepCopy() sf := &bs.Spec.Application.ExtraFiles.Secrets - *sf = append(*sf, bsv1alpha1.ObjectKeyRef{Name: "secret1", Key: "conf.yaml"}) + *sf = append(*sf, bsv1.ObjectKeyRef{Name: "secret1", Key: "conf.yaml"}) testObj := createBackstageTest(bs).withDefaultConfig(true).addToDefaultConfig("secret-files.yaml", "raw-secret-files.yaml") model, err := InitObjects(context.TODO(), bs, testObj.externalConfig, true, false, testObj.scheme) diff --git a/pkg/model/service.go b/pkg/model/service.go index da1b3a1b..5f03e7f0 100644 --- a/pkg/model/service.go +++ b/pkg/model/service.go @@ -17,7 +17,7 @@ package model import ( "fmt" - bsv1alpha1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha1" + bsv1 "redhat-developer/red-hat-developer-hub-operator/api/v1alpha2" "redhat-developer/red-hat-developer-hub-operator/pkg/utils" corev1 "k8s.io/api/core/v1" @@ -55,7 +55,7 @@ func (b *BackstageService) setObject(obj client.Object) { } // implementation of RuntimeObject interface -func (b *BackstageService) addToModel(model *BackstageModel, _ bsv1alpha1.Backstage) (bool, error) { +func (b *BackstageService) addToModel(model *BackstageModel, _ bsv1.Backstage) (bool, error) { if b.service == nil { return false, fmt.Errorf("Backstage Service is not initialized, make sure there is service.yaml in default or raw configuration") } @@ -72,11 +72,11 @@ func (b *BackstageService) EmptyObject() client.Object { } // implementation of RuntimeObject interface -func (b *BackstageService) validate(_ *BackstageModel, _ bsv1alpha1.Backstage) error { +func (b *BackstageService) validate(_ *BackstageModel, _ bsv1.Backstage) error { return nil } func (b *BackstageService) setMetaInfo(backstageName string) { b.service.SetName(ServiceName(backstageName)) - utils.GenerateLabel(&b.service.Spec.Selector, BackstageAppLabel, fmt.Sprintf("backstage-%s", backstageName)) + utils.GenerateLabel(&b.service.Spec.Selector, BackstageAppLabel, utils.BackstageAppLabelValue(backstageName)) } diff --git a/pkg/model/testdata/raw-app-config.yaml b/pkg/model/testdata/raw-app-config.yaml index 65e17c70..ec696ac5 100644 --- a/pkg/model/testdata/raw-app-config.yaml +++ b/pkg/model/testdata/raw-app-config.yaml @@ -10,6 +10,9 @@ data: password: ${POSTGRES_PASSWORD} user: ${POSTGRES_USER} auth: - keys: - # This is a default value, which you should change by providing your own app-config - - secret: "pl4s3Ch4ng3M3" \ No newline at end of file + externalAccess: + - type: legacy + options: + subject: legacy-default-config + # This is a default value, which you should change by providing your own app-config + secret: "pl4s3Ch4ng3M3" diff --git a/pkg/utils/pod-mutator.go b/pkg/utils/pod-mutator.go index d7c9e606..c6c62e4d 100644 --- a/pkg/utils/pod-mutator.go +++ b/pkg/utils/pod-mutator.go @@ -39,7 +39,7 @@ type PodMutator struct { // podSpec - PodSpec to add Volume to // container - container to add VolumeMount(s) to // kind - kind of source, can be ConfigMap or Secret -// object name - name of source object +// objectName - name of source object // mountPath - mount path, default one or as it specified in BackstageCR.spec.Application.AppConfig|ExtraFiles // fileName - file name which fits one of the object's key, otherwise error will be returned. // data - key:value pairs from the object. should be specified if fileName specified @@ -77,6 +77,10 @@ func MountFilesFrom(podSpec *corev1.PodSpec, container *corev1.Container, kind O } +// AddEnvVarsFrom adds environment variable to specified container +// kind - kind of source, can be ConfigMap or Secret +// objectName - name of source object +// varName - name of env variable func AddEnvVarsFrom(container *corev1.Container, kind ObjectKind, objectName, varName string) { if varName == "" { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 7aa3534c..585c342d 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -21,6 +21,8 @@ import ( "fmt" "os" "path/filepath" + "regexp" + "strings" "k8s.io/client-go/discovery" ctrl "sigs.k8s.io/controller-runtime" @@ -28,6 +30,8 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" ) +const maxK8sResourceNameLength = 63 + func SetKubeLabels(labels map[string]string, backstageName string) map[string]string { if labels == nil { labels = map[string]string{} @@ -48,13 +52,25 @@ func GenerateLabel(labels *map[string]string, name string, value string) { // GenerateRuntimeObjectName generates name using BackstageCR name and objectType which is ConfigObject Key without '.yaml' (like 'deployment') func GenerateRuntimeObjectName(backstageCRName string, objectType string) string { - return fmt.Sprintf("%s-%s", backstageCRName, objectType) + return fmt.Sprintf("%s-%s", objectType, backstageCRName) } -// GenerateVolumeNameFromCmOrSecret generates volume name for mounting ConfigMap or Secret +// GenerateVolumeNameFromCmOrSecret generates volume name for mounting ConfigMap or Secret. +// +// It does so by converting the input name to an RFC 1123-compliant value, which is required by Kubernetes, +// even if the input CM/Secret name can be a valid DNS subdomain. +// +// See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names func GenerateVolumeNameFromCmOrSecret(cmOrSecretName string) string { - //return fmt.Sprintf("vol-%s", cmOrSecretName) - return cmOrSecretName + return ToRFC1123Label(cmOrSecretName) +} + +func BackstageAppLabelValue(backstageName string) string { + return fmt.Sprintf("backstage-%s", backstageName) +} + +func BackstageDbAppLabelValue(backstageName string) string { + return fmt.Sprintf("backstage-psql-%s", backstageName) } func ReadYaml(manifest []byte, object interface{}) error { @@ -112,3 +128,32 @@ func IsOpenshift() (bool, error) { return false, nil } + +// ToRFC1123Label converts the given string into a valid Kubernetes label name (RFC 1123-compliant). +// See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/ for more details about the requirements. +// It will replace any invalid characters with a dash and drop any leading or trailing dashes. +func ToRFC1123Label(str string) string { + const dash = "-" + + name := strings.ToLower(str) + + // Replace all invalid characters with a dash + re := regexp.MustCompile(`[^a-z0-9-]`) + name = re.ReplaceAllString(name, dash) + + // Replace consecutive dashes with a single dash + reConsecutiveDashes := regexp.MustCompile(`-+`) + name = reConsecutiveDashes.ReplaceAllString(name, dash) + + // Truncate to maxK8sResourceNameLength characters if necessary + if len(name) > maxK8sResourceNameLength { + name = name[:maxK8sResourceNameLength] + } + + // Continue trimming leading and trailing dashes if necessary + for strings.HasPrefix(name, dash) || strings.HasSuffix(name, dash) { + name = strings.Trim(name, dash) + } + + return name +} diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go new file mode 100644 index 00000000..bd5710f7 --- /dev/null +++ b/pkg/utils/utils_test.go @@ -0,0 +1,52 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "testing" +) + +func TestToRFC1123Label(t *testing.T) { + tests := []struct { + name string + in string + want string + }{ + // The inputs below are all valid names for K8s ConfigMaps or Secrets. + + { + name: "should replace invalid characters with a dash", + in: "kube-root-ca.crt", + want: "kube-root-ca-crt", + }, + { + name: "all-numeric string should remain unchanged", + in: "123456789", + want: "123456789", + }, + { + name: "should truncate up to the maximum length and remove leading and trailing dashes", + in: "ppxkgq.df-yyatvyrgjtwivunibicne-bvyyotvonbrtfv-awylmrez.ksvqjw-z.xpgdi", /* 70 characters */ + want: "ppxkgq-df-yyatvyrgjtwivunibicne-bvyyotvonbrtfv-awylmrez-ksvqjw", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ToRFC1123Label(tt.in); got != tt.want { + t.Errorf("ToRFC1123Label() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/tests/e2e/e2e_suite_test.go b/tests/e2e/e2e_suite_test.go index 042ae386..4ce06edf 100644 --- a/tests/e2e/e2e_suite_test.go +++ b/tests/e2e/e2e_suite_test.go @@ -184,31 +184,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { } By("validating that the controller-manager pod is running as expected") - verifyControllerUp := func(g Gomega) { - // Get pod name - cmd := exec.Command(helper.GetPlatformTool(), "get", - "pods", "-l", managerPodLabel, - "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}"+ - "{{ \"\\n\" }}{{ end }}{{ end }}", - "-n", _namespace, - ) - podOutput, err := helper.Run(cmd) - g.Expect(err).ShouldNot(HaveOccurred()) - podNames := helper.GetNonEmptyLines(string(podOutput)) - g.Expect(podNames).Should(HaveLen(1), fmt.Sprintf("expected 1 controller pods running, but got %d", len(podNames))) - controllerPodName := podNames[0] - g.Expect(controllerPodName).ShouldNot(BeEmpty()) - - // Validate pod status - cmd = exec.Command(helper.GetPlatformTool(), "get", - "pods", controllerPodName, "-o", "jsonpath={.status.phase}", - "-n", _namespace, - ) - status, err := helper.Run(cmd) - g.Expect(err).ShouldNot(HaveOccurred()) - g.Expect(string(status)).Should(Equal("Running"), fmt.Sprintf("controller pod in %s status", status)) - } - EventuallyWithOffset(1, verifyControllerUp, 5*time.Minute, time.Second).Should(Succeed()) + EventuallyWithOffset(1, verifyControllerUp, 5*time.Minute, time.Second).WithArguments(managerPodLabel).Should(Succeed()) return nil }, func(_ []byte) { @@ -217,8 +193,46 @@ var _ = SynchronizedBeforeSuite(func() []byte { var _ = SynchronizedAfterSuite(func() { //runs on *all* processes -}, func() { - //runs *only* on process #1 +}, + // the function below *only* on process #1 + uninstallOperator, +) + +func verifyControllerUp(g Gomega, managerPodLabel string) { + // Get pod name + cmd := exec.Command(helper.GetPlatformTool(), "get", + "pods", "-l", managerPodLabel, + "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", _namespace, + ) + podOutput, err := helper.Run(cmd) + g.Expect(err).ShouldNot(HaveOccurred()) + podNames := helper.GetNonEmptyLines(string(podOutput)) + g.Expect(podNames).Should(HaveLen(1), fmt.Sprintf("expected 1 controller pods running, but got %d", len(podNames))) + controllerPodName := podNames[0] + g.Expect(controllerPodName).ShouldNot(BeEmpty()) + + // Validate pod status + cmd = exec.Command(helper.GetPlatformTool(), "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", _namespace, + ) + status, err := helper.Run(cmd) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(string(status)).Should(Equal("Running"), fmt.Sprintf("controller pod in %s status", status)) +} + +func getPodLogs(ns string, label string) string { + cmd := exec.Command(helper.GetPlatformTool(), "logs", + "-l", label, + "-n", ns, + ) + output, _ := helper.Run(cmd) + return string(output) +} + +func uninstallOperator() { switch testMode { case rhdhLatestTestMode, rhdhNextTestMode, rhdhAirgapTestMode: uninstallRhdhOperator(testMode == rhdhAirgapTestMode) @@ -226,7 +240,7 @@ var _ = SynchronizedAfterSuite(func() { uninstallOperatorWithMakeUndeploy(testMode == olmDeployTestMode) } helper.DeleteNamespace(_namespace, true) -}) +} func uninstallRhdhOperator(withAirgap bool) { cmd := exec.Command(helper.GetPlatformTool(), "delete", "subscription", "rhdh", "-n", _namespace, "--ignore-not-found=true") @@ -253,6 +267,5 @@ func uninstallOperatorWithMakeUndeploy(withOlm bool) { undeployCmd += "-olm" } cmd := exec.Command("make", undeployCmd) - _, err := helper.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) + _, _ = helper.Run(cmd) } diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index b2db9d4f..c587484b 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -15,7 +15,11 @@ package e2e import ( + "crypto/tls" + "encoding/json" "fmt" + "io" + "net/http" "os/exec" "path/filepath" "strconv" @@ -78,9 +82,44 @@ var _ = Describe("Backstage Operator E2E", func() { crName: "bs-app-config", additionalApiEndpointTests: []helper.ApiEndpointTest{ { - Endpoint: "/api/dynamic-plugins-info/loaded-plugins", + Endpoint: "/api/dynamic-plugins-info/loaded-plugins", + BearerTokenRetrievalFn: func(baseUrl string) (string, error) { // Authenticated endpoint that does not accept service tokens + url := fmt.Sprintf("%s/api/auth/guest/refresh", baseUrl) + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, // #nosec G402 -- test code only, not used in production + }, + } + httpClient := &http.Client{Transport: tr} + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", fmt.Errorf("error while building request to GET %q: %w", url, err) + } + req.Header.Add("Accept", "application/json") + resp, err := httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("error while trying to GET %q: %w", url, err) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("error while trying to read response body from 'GET %q': %w", url, err) + } + if resp.StatusCode != 200 { + return "", fmt.Errorf("expected status code 200, but got %d in response to 'GET %q', body: %s", resp.StatusCode, url, string(body)) + } + var authResponse helper.BackstageAuthRefreshResponse + err = json.Unmarshal(body, &authResponse) + if err != nil { + return "", fmt.Errorf("error while trying to decode response body from 'GET %q': %w", url, err) + } + return authResponse.BackstageIdentity.Token, nil + }, ExpectedHttpStatusCode: 200, BodyMatcher: SatisfyAll( + ContainSubstring("@janus-idp/backstage-scaffolder-backend-module-quay-dynamic"), + ContainSubstring("@janus-idp/backstage-scaffolder-backend-module-regex-dynamic"), + ContainSubstring("roadiehq-scaffolder-backend-module-utils-dynamic"), ContainSubstring("backstage-plugin-catalog-backend-module-github-dynamic"), ContainSubstring("backstage-plugin-techdocs-backend-dynamic"), ContainSubstring("backstage-plugin-catalog-backend-module-gitlab-dynamic")), @@ -187,7 +226,7 @@ var _ = Describe("Backstage Operator E2E", func() { }) func ensureRouteIsReachable(ns string, crName string, additionalApiEndpointTests []helper.ApiEndpointTest) { - Eventually(helper.VerifyBackstageRoute, time.Minute, time.Second). + Eventually(helper.VerifyBackstageRoute, 5*time.Minute, time.Second). WithArguments(ns, crName, additionalApiEndpointTests). Should(Succeed()) } diff --git a/tests/e2e/e2e_upgrade_test.go b/tests/e2e/e2e_upgrade_test.go new file mode 100644 index 00000000..1fd31d0c --- /dev/null +++ b/tests/e2e/e2e_upgrade_test.go @@ -0,0 +1,132 @@ +// +// Copyright (c) 2023 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "io" + "os/exec" + "path/filepath" + "time" + + "redhat-developer/red-hat-developer-hub-operator/tests/helper" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Operator upgrade with existing instances", func() { + + var ( + projectDir string + ns string + ) + + BeforeEach(func() { + var err error + projectDir, err = helper.GetProjectDir() + Expect(err).ShouldNot(HaveOccurred()) + + ns = fmt.Sprintf("e2e-test-%d-%s", GinkgoParallelProcess(), helper.RandString(5)) + helper.CreateNamespace(ns) + }) + + AfterEach(func() { + helper.DeleteNamespace(ns, false) + }) + + When("Previous version of operator is installed and CR is created", func() { + + const managerPodLabel = "control-plane=controller-manager" + const crName = "my-backstage-app" + + // 0.1.3 is the version of the operator in the 1.1.x branch + var fromDeploymentManifest = filepath.Join(projectDir, "tests", "e2e", "testdata", "backstage-operator-0.1.3.yaml") + + BeforeEach(func() { + if testMode != defaultDeployTestMode { + Skip("testing upgrades currently supported only with the default deployment mode") + } + + // Uninstall the current version of the operator (which was installed in the SynchronizedBeforeSuite), + // because this test needs to start from a previous version, then perform the upgrade. + uninstallOperator() + + cmd := exec.Command(helper.GetPlatformTool(), "apply", "-f", fromDeploymentManifest) + _, err := helper.Run(cmd) + Expect(err).ShouldNot(HaveOccurred()) + EventuallyWithOffset(1, verifyControllerUp, 5*time.Minute, time.Second).WithArguments(managerPodLabel).Should(Succeed()) + + cmd = exec.Command(helper.GetPlatformTool(), "-n", ns, "create", "-f", "-") + stdin, err := cmd.StdinPipe() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + go func() { + defer stdin.Close() + _, _ = io.WriteString(stdin, fmt.Sprintf(` +apiVersion: rhdh.redhat.com/v1alpha1 +kind: Backstage +metadata: + name: my-backstage-app + namespace: %s +`, ns)) + }() + _, err = helper.Run(cmd) + Expect(err).ShouldNot(HaveOccurred()) + + // Reason is DeployOK in 1.1.x, but was renamed to Deployed in 1.2 + Eventually(helper.VerifyBackstageCRStatus, time.Minute, time.Second).WithArguments(ns, crName, `"reason":"DeployOK"`).Should(Succeed()) + }) + + AfterEach(func() { + uninstallOperator() + + cmd := exec.Command(helper.GetPlatformTool(), "delete", "-f", fromDeploymentManifest, "--ignore-not-found=true") + _, err := helper.Run(cmd) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should successfully reconcile existing CR when upgrading the operator", func() { + By("Upgrading the operator", func() { + installOperatorWithMakeDeploy(false) + EventuallyWithOffset(1, verifyControllerUp, 5*time.Minute, 3*time.Second).WithArguments(managerPodLabel).Should(Succeed()) + }) + + By("checking the status of the existing CR") + Eventually(helper.VerifyBackstageCRStatus, 5*time.Minute, 3*time.Second).WithArguments(ns, crName, `"reason":"Deployed"`). + Should(Succeed(), func() string { + return fmt.Sprintf("=== Operator logs ===\n%s\n", getPodLogs(_namespace, managerPodLabel)) + }) + + By("checking the Backstage operand pod") + crLabel := fmt.Sprintf("rhdh.redhat.com/app=backstage-%s", crName) + Eventually(func(g Gomega) { + // Get pod name + cmd := exec.Command(helper.GetPlatformTool(), "get", + "pods", "-l", crLabel, + "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", ns, + ) + podOutput, err := helper.Run(cmd) + g.Expect(err).ShouldNot(HaveOccurred()) + podNames := helper.GetNonEmptyLines(string(podOutput)) + g.Expect(podNames).Should(HaveLen(1), fmt.Sprintf("expected 1 Backstage operand pod(s) running, but got %d", len(podNames))) + }, 10*time.Minute, 5*time.Second).Should(Succeed(), func() string { + return fmt.Sprintf("=== Operand logs ===\n%s\n", getPodLogs(ns, crLabel)) + }) + }) + }) + +}) diff --git a/tests/e2e/testdata/backstage-operator-0.1.3.yaml b/tests/e2e/testdata/backstage-operator-0.1.3.yaml new file mode 100644 index 00000000..9ffdba40 --- /dev/null +++ b/tests/e2e/testdata/backstage-operator-0.1.3.yaml @@ -0,0 +1,992 @@ +# +# Generated using `make deployment-manifest` against the 1.1.x branch. +# Used to test the upgrade paths of the operator. +# +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: backstage-operator + app.kubernetes.io/instance: system + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: namespace + app.kubernetes.io/part-of: backstage-operator + control-plane: controller-manager + name: backstage-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: backstages.rhdh.redhat.com +spec: + group: rhdh.redhat.com + names: + kind: Backstage + listKind: BackstageList + plural: backstages + singular: backstage + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Backstage is the Schema for the backstages API + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: BackstageSpec defines the desired state of Backstage + properties: + application: + description: Configuration for Backstage. Optional. + properties: + appConfig: + description: References to existing app-configs ConfigMap objects, that will be mounted as files in the specified mount path. Each element can be a reference to any ConfigMap or Secret, and will be mounted inside the main application container under a specified mount directory. Additionally, each file will be passed as a `--config /mount/path/to/configmap/key` to the main container args in the order of the entries defined in the AppConfigs list. But bear in mind that for a single ConfigMap element containing several filenames, the order in which those files will be appended to the main container args cannot be guaranteed. So if you want to pass multiple app-config files, it is recommended to pass one ConfigMap per app-config file. + properties: + configMaps: + description: List of ConfigMaps storing the app-config files. Will be mounted as files under the MountPath specified. For each item in this array, if a key is not specified, it means that all keys in the ConfigMap will be mounted as files. Otherwise, only the specified key will be mounted as a file. Bear in mind not to put sensitive data in those ConfigMaps. Instead, your app-config content can reference environment variables (which you can set with the ExtraEnvs field) and/or include extra files (see the ExtraFiles field). More details on https://backstage.io/docs/conf/writing/. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps and Secrets. + type: string + required: + - name + type: object + type: array + mountPath: + default: /opt/app-root/src + description: Mount path for all app-config files listed in the ConfigMapRefs field + type: string + type: object + dynamicPluginsConfigMapName: + description: "Reference to an existing ConfigMap for Dynamic Plugins. A new one will be generated with the default config if not set. The ConfigMap object must have an existing key named: 'dynamic-plugins.yaml'." + type: string + extraEnvs: + description: Extra environment variables + properties: + configMaps: + description: List of references to ConfigMaps objects to inject as additional environment variables. For each item in this array, if a key is not specified, it means that all keys in the ConfigMap will be injected as additional environment variables. Otherwise, only the specified key will be injected as an additional environment variable. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps and Secrets. + type: string + required: + - name + type: object + type: array + envs: + description: List of name and value pairs to add as environment variables. + items: + properties: + name: + description: Name of the environment variable + type: string + value: + description: Value of the environment variable + type: string + required: + - name + - value + type: object + type: array + secrets: + description: List of references to Secrets objects to inject as additional environment variables. For each item in this array, if a key is not specified, it means that all keys in the Secret will be injected as additional environment variables. Otherwise, only the specified key will be injected as environment variable. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps and Secrets. + type: string + required: + - name + type: object + type: array + type: object + extraFiles: + description: References to existing Config objects to use as extra config files. They will be mounted as files in the specified mount path. Each element can be a reference to any ConfigMap or Secret. + properties: + configMaps: + description: List of references to ConfigMaps objects mounted as extra files under the MountPath specified. For each item in this array, if a key is not specified, it means that all keys in the ConfigMap will be mounted as files. Otherwise, only the specified key will be mounted as a file. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps and Secrets. + type: string + required: + - name + type: object + type: array + mountPath: + default: /opt/app-root/src + description: Mount path for all extra configuration files listed in the Items field + type: string + secrets: + description: List of references to Secrets objects mounted as extra files under the MountPath specified. For each item in this array, a key must be specified that will be mounted as a file. + items: + properties: + key: + description: Key in the object + type: string + name: + description: Name of the object We support only ConfigMaps and Secrets. + type: string + required: + - name + type: object + type: array + type: object + image: + description: Custom image to use in all containers (including Init Containers). It is your responsibility to make sure the image is from trusted sources and has been validated for security compliance + type: string + imagePullSecrets: + description: Image Pull Secrets to use in all containers (including Init Containers) + items: + type: string + type: array + replicas: + default: 1 + description: Number of desired replicas to set in the Backstage Deployment. Defaults to 1. + format: int32 + type: integer + route: + description: Route configuration. Used for OpenShift only. + properties: + enabled: + default: true + description: Control the creation of a Route on OpenShift. + type: boolean + host: + description: Host is an alias/DNS that points to the service. Optional. Ignored if Enabled is false. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + subdomain: + description: "Subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). Ignored if Enabled is false. Example: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`." + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + tls: + description: The tls field provides the ability to configure certificates for the route. Ignored if Enabled is false. + properties: + caCertificate: + description: caCertificate provides the cert authority certificate contents + type: string + certificate: + description: certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. + type: string + externalCertificateSecretName: + description: ExternalCertificateSecretName provides certificate contents as a secret reference. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. Forbidden when `certificate` is set. + type: string + key: + description: key provides key file contents + type: string + type: object + type: object + type: object + database: + description: Configuration for database access. Optional. + properties: + authSecretName: + description: 'Name of the secret for database authentication. Optional. For a local database deployment (EnableLocalDb=true), a secret will be auto generated if it does not exist. The secret shall include information used for the database access. An example for PostgreSQL DB access: "POSTGRES_PASSWORD": "rl4s3Fh4ng3M4" "POSTGRES_PORT": "5432" "POSTGRES_USER": "postgres" "POSTGRESQL_ADMIN_PASSWORD": "rl4s3Fh4ng3M4" "POSTGRES_HOST": "backstage-psql-bs1" # For local database, set to "backstage-psql-".' + type: string + enableLocalDb: + default: true + description: Control the creation of a local PostgreSQL DB. Set to false if using for example an external Database for Backstage. + type: boolean + type: object + rawRuntimeConfig: + description: Raw Runtime Objects configuration. For Advanced scenarios. + properties: + backstageConfig: + description: Name of ConfigMap containing Backstage runtime objects configuration + type: string + localDbConfig: + description: Name of ConfigMap containing LocalDb (PostgreSQL) runtime objects configuration + type: string + type: object + type: object + status: + description: BackstageStatus defines the observed state of Backstage + properties: + conditions: + description: Conditions is the list of conditions describing the state of the runtime + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: backstage-operator + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/part-of: backstage-operator + name: backstage-controller-manager + namespace: backstage-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: backstage-operator + app.kubernetes.io/instance: leader-election-role + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: role + app.kubernetes.io/part-of: backstage-operator + name: backstage-leader-election-role + namespace: backstage-system +rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: backstage-manager-role +rules: + - apiGroups: + - "" + resources: + - configmaps + - services + verbs: + - create + - delete + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - update + - watch + - apiGroups: + - apps + resources: + - statefulsets + verbs: + - create + - delete + - get + - list + - update + - watch + - apiGroups: + - rhdh.redhat.com + resources: + - backstages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - rhdh.redhat.com + resources: + - backstages/finalizers + verbs: + - update + - apiGroups: + - rhdh.redhat.com + resources: + - backstages/status + verbs: + - get + - patch + - update + - apiGroups: + - route.openshift.io + resources: + - routes + - routes/custom-host + verbs: + - create + - delete + - get + - list + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: backstage-operator + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrole + app.kubernetes.io/part-of: backstage-operator + name: backstage-metrics-reader +rules: + - nonResourceURLs: + - /metrics + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: backstage-operator + app.kubernetes.io/instance: proxy-role + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrole + app.kubernetes.io/part-of: backstage-operator + name: backstage-proxy-role +rules: + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: backstage-operator + app.kubernetes.io/instance: leader-election-rolebinding + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: rolebinding + app.kubernetes.io/part-of: backstage-operator + name: backstage-leader-election-rolebinding + namespace: backstage-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: backstage-leader-election-role +subjects: + - kind: ServiceAccount + name: backstage-controller-manager + namespace: backstage-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: backstage-operator + app.kubernetes.io/instance: manager-rolebinding + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: backstage-operator + name: backstage-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: backstage-manager-role +subjects: + - kind: ServiceAccount + name: backstage-controller-manager + namespace: backstage-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: backstage-operator + app.kubernetes.io/instance: proxy-rolebinding + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: backstage-operator + name: backstage-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: backstage-proxy-role +subjects: + - kind: ServiceAccount + name: backstage-controller-manager + namespace: backstage-system +--- +apiVersion: v1 +data: + backend-auth-configmap.yaml: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: # placeholder for '-backend-auth' + data: + "app-config.backend-auth.default.yaml": | + backend: + externalAccess: + - type: legacy + options: + subject: legacy-default-config + # This is a default value, which you should change by providing your own app-config + secret: "pl4s3Ch4ng3M3" + db-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: # placeholder for 'backstage-psql-secret-' + stringData: + "POSTGRES_PASSWORD": "rl4s3Fh4ng3M4" # default value, change to your own value + "POSTGRES_PORT": "5432" + "POSTGRES_USER": "postgres" + "POSTGRESQL_ADMIN_PASSWORD": "rl4s3Fh4ng3M4" # default value, change to your own value + "POSTGRES_HOST": "" # set to your Postgres DB host. If the local DB is deployed, set to 'backstage-psql-' + db-service-hl.yaml: | + apiVersion: v1 + kind: Service + metadata: + name: backstage-psql-cr1-hl # placeholder for 'backstage-psql--hl' + spec: + selector: + rhdh.redhat.com/app: backstage-psql-cr1 # placeholder for 'backstage-psql-' + clusterIP: None + ports: + - port: 5432 + db-service.yaml: | + apiVersion: v1 + kind: Service + metadata: + name: backstage-psql # placeholder for 'backstage-psql-' .NOTE: For the time it is static and linked to Secret-> postgres-secrets -> OSTGRES_HOST + spec: + selector: + rhdh.redhat.com/app: backstage-psql-cr1 # placeholder for 'backstage-psql-' + ports: + - port: 5432 + db-statefulset.yaml: | + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: backstage-psql-cr1 # placeholder for 'backstage-psql-' + spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + rhdh.redhat.com/app: backstage-psql-cr1 # placeholder for 'backstage-psql-' + serviceName: backstage-psql-cr1-hl # placeholder for 'backstage-psql--hl' + template: + metadata: + labels: + rhdh.redhat.com/app: backstage-psql-cr1 # placeholder for 'backstage-psql-' + name: backstage-db-cr1 # placeholder for 'backstage-psql-' + spec: + automountServiceAccountToken: false + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ + ## The optional .spec.persistentVolumeClaimRetentionPolicy field controls if and how PVCs are deleted during the lifecycle of a StatefulSet. + ## You must enable the StatefulSetAutoDeletePVC feature gate on the API server and the controller manager to use this field. + # persistentVolumeClaimRetentionPolicy: + # whenDeleted: Retain + # whenScaled: Retain + containers: + - env: + - name: POSTGRESQL_PORT_NUMBER + value: "5432" + - name: POSTGRESQL_VOLUME_DIR + value: /var/lib/pgsql/data + - name: PGDATA + value: /var/lib/pgsql/data/userdata + envFrom: + - secretRef: + name: # will be replaced with 'backstage-psql-secrets-' + # image will be replaced by the value of the `RELATED_IMAGE_postgresql` env var, if set + image: quay.io/fedora/postgresql-15:latest + imagePullPolicy: IfNotPresent + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + livenessProbe: + exec: + command: + - /bin/sh + - -c + - exec pg_isready -U ${POSTGRES_USER} -h 127.0.0.1 -p 5432 + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: postgresql + ports: + - containerPort: 5432 + name: tcp-postgresql + protocol: TCP + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + - | + exec pg_isready -U ${POSTGRES_USER} -h 127.0.0.1 -p 5432 + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + resources: + requests: + cpu: 250m + memory: 256Mi + limits: + cpu: 250m + memory: 1024Mi + ephemeral-storage: 20Mi + volumeMounts: + - mountPath: /dev/shm + name: dshm + - mountPath: /var/lib/pgsql/data + name: data + restartPolicy: Always + securityContext: {} + serviceAccount: default + serviceAccountName: default + volumes: + - emptyDir: + medium: Memory + name: dshm + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deployment.yaml: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: # placeholder for 'backstage-' + spec: + replicas: 1 + selector: + matchLabels: + rhdh.redhat.com/app: # placeholder for 'backstage-' + template: + metadata: + labels: + rhdh.redhat.com/app: # placeholder for 'backstage-' + spec: + automountServiceAccountToken: false + volumes: + - ephemeral: + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + name: dynamic-plugins-root + - name: dynamic-plugins-npmrc + secret: + defaultMode: 420 + optional: true + secretName: dynamic-plugins-npmrc + + initContainers: + - command: + - ./install-dynamic-plugins.sh + - /dynamic-plugins-root + env: + - name: NPM_CONFIG_USERCONFIG + value: /opt/app-root/src/.npmrc.dynamic-plugins + # image will be replaced by the value of the `RELATED_IMAGE_backstage` env var, if set + image: quay.io/janus-idp/backstage-showcase:next + imagePullPolicy: IfNotPresent + name: install-dynamic-plugins + volumeMounts: + - mountPath: /dynamic-plugins-root + name: dynamic-plugins-root + - mountPath: /opt/app-root/src/.npmrc.dynamic-plugins + name: dynamic-plugins-npmrc + readOnly: true + subPath: .npmrc + workingDir: /opt/app-root/src + resources: + limits: + cpu: 1000m + memory: 2.5Gi + ephemeral-storage: 5Gi + requests: + cpu: 125m + memory: 128Mi + containers: + - name: backstage-backend + # image will be replaced by the value of the `RELATED_IMAGE_backstage` env var, if set + image: quay.io/janus-idp/backstage-showcase:next + imagePullPolicy: IfNotPresent + args: + - "--config" + - "dynamic-plugins-root/app-config.dynamic-plugins.yaml" + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthcheck + port: 7007 + scheme: HTTP + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 2 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthcheck + port: 7007 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + ports: + - name: backend + containerPort: 7007 + env: + - name: APP_CONFIG_backend_listen_port + value: "7007" + envFrom: + - secretRef: + name: # will be replaced with 'backstage-psql-secrets-' + # - secretRef: + # name: backstage-secrets + volumeMounts: + - mountPath: /opt/app-root/src/dynamic-plugins-root + name: dynamic-plugins-root + resources: + limits: + cpu: 1000m + memory: 2.5Gi + ephemeral-storage: 5Gi + requests: + cpu: 125m + memory: 128Mi + dynamic-plugins-configmap.yaml: |- + apiVersion: v1 + kind: ConfigMap + metadata: + name: # placeholder for '-dynamic-plugins' + data: + "dynamic-plugins.yaml": | + includes: + - dynamic-plugins.default.yaml + plugins: [] + route.yaml: |- + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + name: # placeholder for 'backstage-' + spec: + port: + targetPort: http-backend + path: / + tls: + insecureEdgeTerminationPolicy: Redirect + termination: edge + to: + kind: Service + name: # placeholder for 'backstage-' + service.yaml: |- + apiVersion: v1 + kind: Service + metadata: + name: # placeholder for 'backstage-' + spec: + type: ClusterIP + selector: + rhdh.redhat.com/app: # placeholder for 'backstage-' + ports: + - name: http-backend + port: 80 + targetPort: backend +kind: ConfigMap +metadata: + name: backstage-default-config + namespace: backstage-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: backstage-operator + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: service + app.kubernetes.io/part-of: backstage-operator + control-plane: controller-manager + name: backstage-controller-manager-metrics-service + namespace: backstage-system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + control-plane: controller-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: backstage-operator + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: backstage-operator + control-plane: controller-manager + name: backstage-controller-manager + namespace: backstage-system +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux + automountServiceAccountToken: true + containers: + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=0 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + protocol: TCP + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=127.0.0.1:8080 + - --leader-elect + command: + - /manager + env: + - name: RELATED_IMAGE_postgresql + value: quay.io/fedora/postgresql-15:latest + - name: RELATED_IMAGE_backstage + value: quay.io/janus-idp/backstage-showcase:next + # TODO(asoro): Default image is 'quay.io/janus-idp/operator:0.1.3' on 1.1.x, + # but replaced by the one from RHDH, because the Janus-IDP image expires after 14d if not updated. + image: quay.io/rhdh/rhdh-rhel9-operator:1.1 + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + ephemeral-storage: 20Mi + memory: 1024Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + volumeMounts: + - mountPath: /default-config + name: default-config + securityContext: + runAsNonRoot: true + serviceAccountName: backstage-controller-manager + terminationGracePeriodSeconds: 10 + volumes: + - configMap: + name: backstage-default-config + name: default-config diff --git a/tests/helper/helper_backstage.go b/tests/helper/helper_backstage.go index 94532e11..55d65bcf 100644 --- a/tests/helper/helper_backstage.go +++ b/tests/helper/helper_backstage.go @@ -20,20 +20,48 @@ import ( "io" "net/http" "os/exec" - "redhat-developer/red-hat-developer-hub-operator/pkg/model" "strings" + "redhat-developer/red-hat-developer-hub-operator/pkg/model" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/types" ) type ApiEndpointTest struct { + BearerTokenRetrievalFn func(baseUrl string) (string, error) Endpoint string ExpectedHttpStatusCode int BodyMatcher types.GomegaMatcher } +// BackstageAuthRefreshResponse is the struct of the response returned by the '/api/auth/:user/refresh' API endpoint. +// +// Example: +// +// { +// "backstageIdentity": { +// "expiresInSeconds": 3600, +// "identity": { +// "ownershipEntityRefs": [ +// "user:development/guest" +// ], +// "type": "user", +// "userEntityRef": "user:development/guest" +// }, +// "token": "eyJ0..." +// }, +// "profile": {} +// } +type BackstageAuthRefreshResponse struct { + BackstageIdentity BackstageIdentity `json:"backstageIdentity,omitempty"` +} + +type BackstageIdentity struct { + Token string `json:"token,omitempty"` +} + func VerifyBackstagePodStatus(g Gomega, ns string, crName string, expectedStatus string) { cmd := exec.Command("kubectl", "get", "pods", "-l", "rhdh.redhat.com/app=backstage-"+crName, @@ -111,21 +139,13 @@ func GetBackstageRouteHost(ns string, crName string) (string, error) { return fmt.Sprintf("%s.%s", subDomain, ingressDomain), err } +// unauthenticated endpoints var defaultApiEndpointTests = []ApiEndpointTest{ { Endpoint: "/", ExpectedHttpStatusCode: 200, BodyMatcher: ContainSubstring("You need to enable JavaScript to run this app"), }, - { - Endpoint: "/api/dynamic-plugins-info/loaded-plugins", - ExpectedHttpStatusCode: 200, - BodyMatcher: SatisfyAll( - ContainSubstring("@janus-idp/backstage-scaffolder-backend-module-quay-dynamic"), - ContainSubstring("@janus-idp/backstage-scaffolder-backend-module-regex-dynamic"), - ContainSubstring("roadiehq-scaffolder-backend-module-utils-dynamic"), - ), - }, } func VerifyBackstageRoute(g Gomega, ns string, crName string, tests []ApiEndpointTest) { @@ -133,6 +153,7 @@ func VerifyBackstageRoute(g Gomega, ns string, crName string, tests []ApiEndpoin fmt.Fprintln(GinkgoWriter, host) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(host).ShouldNot(BeEmpty()) + baseUrl := fmt.Sprintf("https://%s", host) tr := &http.Transport{ TLSClientConfig: &tls.Config{ @@ -142,17 +163,31 @@ func VerifyBackstageRoute(g Gomega, ns string, crName string, tests []ApiEndpoin httpClient := &http.Client{Transport: tr} performTest := func(tt ApiEndpointTest) { - url := fmt.Sprintf("https://%s/%s", host, strings.TrimPrefix(tt.Endpoint, "/")) + url := fmt.Sprintf("%s/%s", baseUrl, strings.TrimPrefix(tt.Endpoint, "/")) + + req, reqErr := http.NewRequest("GET", url, nil) + g.Expect(reqErr).ShouldNot(HaveOccurred(), fmt.Sprintf("error while building request to GET %q", url)) + + req.Header.Add("Accept", "application/json") + + if tt.BearerTokenRetrievalFn != nil { + bearerToken, tErr := tt.BearerTokenRetrievalFn(baseUrl) + g.Expect(tErr).ShouldNot(HaveOccurred(), fmt.Sprintf("error while retrieving bearer token, context: %q", tt.Endpoint)) + if bearerToken != "" { + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", bearerToken)) + } + } + fmt.Fprintf(GinkgoWriter, "--> GET %q\n", url) - resp, rErr := httpClient.Get(url) + resp, rErr := httpClient.Do(req) g.Expect(rErr).ShouldNot(HaveOccurred(), fmt.Sprintf("error while trying to GET %q", url)) defer resp.Body.Close() - - g.Expect(resp.StatusCode).Should(Equal(tt.ExpectedHttpStatusCode), "context: "+tt.Endpoint) body, rErr := io.ReadAll(resp.Body) g.Expect(rErr).ShouldNot(HaveOccurred(), fmt.Sprintf("error while trying to read response body from 'GET %q'", url)) + bodyStr := string(body) + g.Expect(resp.StatusCode).Should(Equal(tt.ExpectedHttpStatusCode), fmt.Sprintf("context: %s\n===Response body===\n%s", tt.Endpoint, bodyStr)) if tt.BodyMatcher != nil { - g.Expect(string(body)).Should(tt.BodyMatcher, "context: "+tt.Endpoint) + g.Expect(bodyStr).Should(tt.BodyMatcher, "context: "+tt.Endpoint) } } allTests := append(defaultApiEndpointTests, tests...)