diff --git a/.dockerignore b/.dockerignore index 0f39cf793b..fe17421ddd 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,13 +1,19 @@ -*.pyc +**/*.pyc *.env script.py node_modules .venv .ruff_cache +**/.ruff_cache .mypy_cache .pytest_cache *.env - +.DS_Store +.coverage +coverage.xml +.git* +.devcontainer +.husky frontend/node_modules # Direnv files (https://direnv.net/) diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000..54f5bf8ab3 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,17 @@ +# https://editorconfig.org +# Top-most EditorConfig file +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +indent_size = 2 +indent_style = space +trim_trailing_whitespace = true + +[*.py] +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..fb9f4de76f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,16 @@ +--- +# yamllint disable rule:truthy +version: 2 + +updates: + # Maintain dependencies for GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + day: "monday" + time: "09:00" + target-branch: develop + labels: + - "type/housekeeping" + - "ci/skip-changelog" diff --git a/.github/file-filters.yml b/.github/file-filters.yml index ada6cb3396..01c2c6e59f 100644 --- a/.github/file-filters.yml +++ b/.github/file-filters.yml @@ -28,6 +28,11 @@ frontend_files: &frontend_files demo_files: &demo_files - "models/**" +doc_files: &doc_files + - "docs/**" + - package.json + - package-lock.json + python_all: &python_all - "**/*.py" @@ -43,6 +48,10 @@ backend_all: - *ci_config - *development_files +documentation_all: + - *development_files + - *doc_files + sdk_all: - *sdk_files - *backend_files diff --git a/.github/labels.yml b/.github/labels.yml index c00d9c1c6f..293e41ead9 100644 --- a/.github/labels.yml +++ b/.github/labels.yml @@ -142,7 +142,7 @@ description: "The redaction of the issue is still a work in progress" color: "dcb518" -- name: "state/referenced" +- name: "state/ref" description: "This issue is referenced in our internal tooling" color: "c9510c" diff --git a/.github/release-note.yml b/.github/release-note.yml index a8e0cbdcfd..9351f3896c 100644 --- a/.github/release-note.yml +++ b/.github/release-note.yml @@ -26,6 +26,6 @@ change-title-escapes: '\<*_&' # You can add # and @ to disable mentions, and ad # - 'patch' # default: patch template: | - ## Changes + ## Changelog $CHANGES diff --git a/.github/workflows/ci-docker-image.yml b/.github/workflows/ci-docker-image.yml new file mode 100644 index 0000000000..fc3b971a37 --- /dev/null +++ b/.github/workflows/ci-docker-image.yml @@ -0,0 +1,79 @@ +--- +# yamllint disable rule:truthy +name: Build And Push Docker image + +# When calling this workflow, ensure you use +# secrets: inherit + +on: + workflow_call: + inputs: + publish: + type: boolean + description: Wether to publish the image to Infrahub Private Registry + required: false + default: false + version: + type: string + required: false + description: The string to extract semver labels from. + default: '' + ref: + type: string + required: true + desription: The GIT ref from which the image will be build + tags: + type: string + required: true + description: The tags for the docker image + labels: + type: string + required: true + description: The labels for the docker image + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + REGISTRY_IMAGE: "${{ secrets.HARBOR_HOST }}/${{ github.repository }}" + DOCKERFILE: "development/Dockerfile" + PLATFORMS: "linux/amd64,linux/arm64" + +jobs: + build: + runs-on: ubuntu-22.04 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ inputs.ref }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + if: ${{ inputs.publish }} + uses: docker/login-action@v3 + id: login + with: + registry: ${{ secrets.HARBOR_HOST }} + username: ${{ secrets.HARBOR_USERNAME }} + password: ${{ secrets.HARBOR_PASSWORD }} + + - name: Build and push + uses: docker/build-push-action@v5 + id: push + with: + context: . + file: ${{ env.DOCKERFILE }} + provenance: false # To avoid cross platform "unknown" + push: ${{ inputs.publish }} + platforms: ${{ env.PLATFORMS }} + tags: ${{ inputs.tags }} + labels: ${{ inputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 02621d8b3b..fb64428ae7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,14 +26,17 @@ env: BUILDKITE_ANALYTICS_BRANCH: ${{ github.ref }} BUILDKITE_BRANCH: ${{ github.ref }} BUILDKITE_COMMIT: ${{ github.sha }} + VALE_VERSION: "2.29.7" jobs: + # ------------------------------------------ Check Files Changes ------------------------------------------ files-changed: name: Detect which file has changed - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest timeout-minutes: 5 outputs: backend: ${{ steps.changes.outputs.backend_all }} + documentation: ${{ steps.changes.outputs.documentation_all }} frontend: ${{ steps.changes.outputs.frontend_all }} sdk: ${{ steps.changes.outputs.sdk_all }} sync: ${{ steps.changes.outputs.sync_all }} @@ -43,7 +46,7 @@ jobs: yaml: ${{ steps.changes.outputs.yaml_all }} steps: - name: "Check out repository code" - uses: "actions/checkout@v3" + uses: "actions/checkout@v4" - name: Check for file changes uses: dorny/paths-filter@v2 id: changes @@ -51,6 +54,7 @@ jobs: token: ${{ github.token }} filters: .github/file-filters.yml + # ------------------------------------------ All Linter ------------------------------------------ yaml-lint: if: needs.files-changed.outputs.yaml == 'true' needs: ["files-changed"] @@ -58,7 +62,7 @@ jobs: timeout-minutes: 5 steps: - name: "Check out repository code" - uses: "actions/checkout@v3" + uses: "actions/checkout@v4" - name: "Identify which files have changed" uses: dorny/paths-filter@v2 id: changes @@ -69,9 +73,10 @@ jobs: - '**.yaml' - 'development/workflows/ci.yml' - name: "Setup environment" - run: "pip install yamllint==1.29.0" + run: "pip install yamllint==1.33.0" - name: "Linting: yamllint" run: "yamllint ." + javascript-lint: if: needs.files-changed.outputs.javascript == 'true' needs: ["files-changed"] @@ -79,9 +84,9 @@ jobs: timeout-minutes: 5 steps: - name: "Check out repository code" - uses: "actions/checkout@v3" + uses: "actions/checkout@v4" - name: Install NodeJS - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: 16 cache: 'npm' @@ -92,6 +97,7 @@ jobs: - name: Run ESLint working-directory: ./frontend run: npm run eslint + python-lint: if: needs.files-changed.outputs.python == 'true' needs: ["files-changed"] @@ -99,105 +105,137 @@ jobs: timeout-minutes: 5 steps: - name: "Check out repository code" - uses: "actions/checkout@v3" + uses: "actions/checkout@v4" - name: "Setup environment" - run: "pip install black==23.1.0 ruff==0.0.265" - - name: "Linting: BLACK" - run: "black --check ." - - name: "Linting: ruff" - run: "ruff check ." - - # backend-build-docker: - # runs-on: ubuntu-latest - # steps: - # - name: Set up QEMU - # uses: docker/setup-qemu-action@v2 - # - name: Set up Docker Buildx - # uses: docker/setup-buildx-action@v2 - # - name: Build Backend Image - # uses: docker/build-push-action@v4 - # with: - # file: "development/Dockerfile-backend" - # push: false - # tags: "${{env.INFRAHUB_IMAGE_NAME}}:${{env.INFRAHUB_IMAGE_VER}}" - # cache-from: type=gha - # cache-to: type=gha,mode=max + run: "pip install ruff==0.1.8" + - name: "Linting: ruff check" + run: "ruff check --diff ." + - name: "Linting: ruff format" + run: "ruff format --check --diff ." - python-sdk-tests: + python-sdk-unit-tests: + strategy: + matrix: + python-version: + - "3.8" + - "3.9" + - "3.10" + - "3.11" + pydantic-version: ["^1.10", "^2"] if: | always() && !cancelled() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && needs.files-changed.outputs.sdk == 'true' needs: ["files-changed", "yaml-lint", "python-lint"] - runs-on: "ubuntu-20.04" + runs-on: ubuntu-latest timeout-minutes: 30 env: INFRAHUB_DB_TYPE: memgraph + defaults: + run: + working-directory: python_sdk/ steps: - name: "Check out repository code" - uses: "actions/checkout@v3" - - name: "Install Invoke" - run: "pip install toml invoke" - - name: "Build Test Image" - run: "invoke test.build" - - name: "Pull External Docker Images" - run: "invoke test.pull" - - name: "Black Tests" - run: "invoke sdk.black --docker" - - name: "Isort Tests" - run: "invoke sdk.isort --docker" + uses: "actions/checkout@v4" + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: "Setup environment" + run: | + pipx install poetry + poetry config virtualenvs.prefer-active-python true + pip install invoke toml + - name: Set Version of Pydantic + run: poetry add pydantic@${{ matrix.pydantic-version }} + - name: "Install Package" + run: "poetry install" - name: "Pylint Tests" - run: "invoke sdk.pylint --docker" + run: "poetry run pylint infrahub_sdk/ infrahub_ctl/" - name: "Mypy Tests" - run: "invoke sdk.mypy --docker" + run: "poetry run mypy --show-error-codes infrahub_sdk/ infrahub_ctl/" - name: "Unit Tests" - run: "invoke sdk.test-unit" + run: "poetry run pytest -v --cov=infrahub_sdk --cov=infrahub_ctl tests/unit" env: BUILDKITE_ANALYTICS_TOKEN: ${{ secrets.BUILDKITE_SDK_UNIT }} - name: "Coveralls : Unit Tests" - uses: coverallsapp/github-action@v2.0.0 + uses: coverallsapp/github-action@v2 env: COVERALLS_SERVICE_NUMBER: ${{ github.sha }} with: flag-name: python-sdk-unit parallel: true + + + python-sdk-integration-tests: + if: | + always() && !cancelled() && + !contains(needs.*.result, 'failure') && + !contains(needs.*.result, 'cancelled') + needs: ["python-sdk-unit-tests"] + runs-on: ubuntu-latest + timeout-minutes: 30 + env: + INFRAHUB_DB_TYPE: memgraph + steps: + - name: "Check out repository code" + uses: "actions/checkout@v4" + - name: "Install Invoke" + run: "pip install toml invoke" + - name: "Build Test Image" + run: "invoke test.build" + - name: "Pull External Docker Images" + run: "invoke test.pull" - name: "Integration Tests" run: "invoke sdk.test-integration" env: BUILDKITE_ANALYTICS_TOKEN: ${{ secrets.BUILDKITE_SDK_INTEGRATION }} - name: "Coveralls : Integration Tests" - uses: coverallsapp/github-action@v2.0.0 + uses: coverallsapp/github-action@v2 env: COVERALLS_SERVICE_NUMBER: ${{ github.sha }} with: flag-name: python-sdk-integration parallel: true - infrahub-sync-tests: + infrahub-sync-unit-tests: + strategy: + matrix: + python-version: + - "3.11" if: | always() && !cancelled() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && needs.files-changed.outputs.sync == 'true' needs: ["files-changed", "yaml-lint", "python-lint"] - runs-on: "ubuntu-20.04" + runs-on: ubuntu-latest timeout-minutes: 30 + defaults: + run: + working-directory: sync/ steps: - name: "Check out repository code" - uses: "actions/checkout@v3" - - name: "Install Invoke" - run: "pip install toml invoke" - - name: "Build Test Image" - run: "invoke test.build" - - name: "Black Tests" - run: "invoke sync.black --docker" - - name: "Isort Tests" - run: "invoke sync.isort --docker" + uses: "actions/checkout@v4" + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: "Setup environment" + run: | + pipx install poetry + poetry config virtualenvs.prefer-active-python true + pip install invoke toml + - name: "Install Package" + run: "poetry install" - name: "Pylint Tests" - run: "invoke sync.pylint --docker" + run: "poetry run pylint infrahub_sync/" + # - name: "Mypy Tests" + # run: "poetry run mypy --show-error-codes infrahub_sync/" + - backend-tests-default: + backend-tests-unit: if: | always() && !cancelled() && !contains(needs.*.result, 'failure') && @@ -210,38 +248,55 @@ jobs: INFRAHUB_DB_TYPE: memgraph steps: - name: "Check out repository code" - uses: "actions/checkout@v3" + uses: "actions/checkout@v4" - name: "Setup Python environment" run: "pip install toml invoke" - name: "Build Test Image" run: "invoke test.build" - name: "Pull External Docker Images" run: "invoke test.pull" - - name: "Black Tests" - run: "invoke backend.black --docker" - - name: "Isort Tests" - run: "invoke backend.isort --docker" - - name: "Pylint Tests" - run: "invoke backend.pylint --docker" - - name: "Mypy Tests" - run: "invoke backend.mypy --docker" - name: "Unit Tests" run: "invoke backend.test-unit" env: BUILDKITE_ANALYTICS_TOKEN: ${{ secrets.BUILDKITE_BACKEND_UNIT_DEFAULT }} - name: "Coveralls : Unit Tests" - uses: coverallsapp/github-action@v2.0.0 + uses: coverallsapp/github-action@v2 env: COVERALLS_SERVICE_NUMBER: ${{ github.sha }} with: flag-name: backend-unit parallel: true + + backend-tests-integration: + if: | + always() && !cancelled() && + !contains(needs.*.result, 'failure') && + !contains(needs.*.result, 'cancelled') && + needs.files-changed.outputs.backend == 'true' + needs: ["files-changed", "yaml-lint", "python-lint"] + runs-on: "runner-ubuntu-4-16" + timeout-minutes: 30 + env: + INFRAHUB_DB_TYPE: memgraph + steps: + - name: "Check out repository code" + uses: "actions/checkout@v4" + - name: "Setup Python environment" + run: "pip install toml invoke" + - name: "Build Test Image" + run: "invoke test.build" + - name: "Pull External Docker Images" + run: "invoke test.pull" + - name: "Pylint Tests" + run: "invoke backend.pylint --docker" + - name: "Mypy Tests" + run: "invoke backend.mypy --docker" - name: "Integration Tests" run: "invoke backend.test-integration" env: BUILDKITE_ANALYTICS_TOKEN: ${{ secrets.BUILDKITE_BACKEND_INTEGRATION }} - name: "Coveralls : Integration Tests" - uses: coverallsapp/github-action@v2.0.0 + uses: coverallsapp/github-action@v2 env: COVERALLS_SERVICE_NUMBER: ${{ github.sha }} with: @@ -256,12 +311,12 @@ jobs: needs.files-changed.outputs.backend == 'true' needs: ["files-changed", "yaml-lint", "python-lint"] runs-on: "runner-ubuntu-4-16" - timeout-minutes: 30 + timeout-minutes: 45 env: INFRAHUB_DB_TYPE: neo4j steps: - name: "Check out repository code" - uses: "actions/checkout@v3" + uses: "actions/checkout@v4" - name: "Setup Python environment" run: "pip install toml invoke" - name: "Build Test Image" @@ -280,13 +335,13 @@ jobs: !contains(needs.*.result, 'cancelled') && needs.files-changed.outputs.frontend == 'true' needs: ["files-changed", "yaml-lint", "javascript-lint"] - runs-on: "ubuntu-20.04" + runs-on: "ubuntu-22.04" timeout-minutes: 30 steps: - name: "Check out repository code" - uses: "actions/checkout@v3" + uses: "actions/checkout@v4" - name: Install NodeJS - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: 16 cache: 'npm' @@ -313,7 +368,7 @@ jobs: name: screenshots path: frontend/cypress/videos/* - name: "Coveralls : Unit Tests" - uses: coverallsapp/github-action@v2.0.0 + uses: coverallsapp/github-action@v2 env: COVERALLS_SERVICE_NUMBER: ${{ github.sha }} with: @@ -321,84 +376,99 @@ jobs: parallel: true file: frontend/coverage/lcov.info - # E2E-testing-memgraph: - # needs: ["frontend-tests", "backend-tests-default", "python-sdk-tests"] - # if: | - # always() && !cancelled() && - # !contains(needs.*.result, 'failure') && - # !contains(needs.*.result, 'cancelled') - # runs-on: "runner-ubuntu-8-32" - # timeout-minutes: 30 - # steps: - # - name: "Check out repository code" - # uses: "actions/checkout@v3" - # - name: Install NodeJS - # uses: actions/setup-node@v3 - # with: - # node-version: 16 - # cache: 'npm' - # cache-dependency-path: frontend/package-lock.json - # - name: Install frontend dependencies - # working-directory: ./frontend - # run: npm install - # - name: "Install Invoke" - # run: "pip install toml invoke" - # - name: Build Demo - # run: "invoke demo.build" - # - name: "Pull External Docker Images" - # run: "invoke demo.pull" - # - name: Initialize Demo - # id: init-demo - # run: "invoke demo.start demo.load-infra-schema" - # - name: Check Demo Status - # run: "invoke demo.status" - # - name: Load Data - # run: "invoke demo.load-infra-data" - # - name: Git Repository - # run: "invoke demo.infra-git-import demo.infra-git-create" - # - name: Run End to End Tests - # working-directory: ./frontend - # run: npm run cypress:run:e2e - # - name: Containers after failure - # if: failure() - # run: docker ps -a - # - name: Upload cypress screenshots - # if: failure() - # uses: actions/upload-artifact@v3 - # with: - # name: screenshots - # path: docs/media/* - # - name: Display server logs - # if: failure() - # run: docker logs infrahub-infrahub-server-1 - # - name: Display git 1 logs - # if: failure() - # run: docker logs infrahub-infrahub-git-1 - # - name: Display git 2 logs - # if: failure() - # run: docker logs infrahub-infrahub-git-2 - # - name: Display database logs - # if: failure() - # run: docker logs infrahub-database-1 - # - name: Display server status - # if: failure() - # run: invoke demo.status + documentation: + if: | + always() && !cancelled() && + !contains(needs.*.result, 'failure') && + !contains(needs.*.result, 'cancelled') && + needs.files-changed.outputs.documentation == 'true' + needs: ["files-changed", "yaml-lint", "python-lint"] + runs-on: "ubuntu-22.04" + timeout-minutes: 5 + steps: + - name: "Check out repository code" + uses: "actions/checkout@v4" + - name: Install NodeJS + uses: actions/setup-node@v4 + with: + node-version: 18 + cache: 'npm' + cache-dependency-path: package-lock.json + - name: "Install dependencies" + run: npm install + - name: "Setup Python environment" + run: "pip install toml invoke" + - name: "Build website" + run: "invoke docs.build" - E2E-testing-neo4j: - needs: ["frontend-tests", "backend-tests-default", "python-sdk-tests"] + validate_generated_documentation: + if: | + always() && !cancelled() && + !contains(needs.*.result, 'failure') && + !contains(needs.*.result, 'cancelled') && + needs.files-changed.outputs.python == 'true' + needs: ["files-changed", "yaml-lint", "python-lint"] + runs-on: "ubuntu-22.04" + timeout-minutes: 5 + steps: + - name: "Check out repository code" + uses: "actions/checkout@v4" + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.11" + - name: "Setup environment" + run: "pip install invoke toml" + - name: "Build Test Image" + run: "invoke test.build" + - name: "Validate generated documentation" + run: "invoke docs.validate --docker" + + validate_documentation_style: + if: | + always() && !cancelled() && + !contains(needs.*.result, 'failure') && + !contains(needs.*.result, 'cancelled') && + needs.files-changed.outputs.documentation == 'true' + needs: ["files-changed", "yaml-lint", "python-lint"] + runs-on: "ubuntu-22.04" + timeout-minutes: 5 + steps: + - name: "Check out repository code" + uses: "actions/checkout@v4" + # The official GitHub Action for Vale doesn't work, installing manually instead: + # https://github.com/errata-ai/vale-action/issues/103 + - name: Download Vale + run: | + curl -sL "https://github.com/errata-ai/vale/releases/download/v${VALE_VERSION}/vale_${VALE_VERSION}_Linux_64-bit.tar.gz" -o vale.tar.gz + tar -xzf vale.tar.gz + env: + VALE_VERSION: ${{ env.VALE_VERSION }} + - name: "Validate documentation style" + run: "./vale ." + # ------------------------------------------ E2E Tests ------------------------------------------ + E2E-testing: + needs: + - javascript-lint + - files-changed + - yaml-lint + - python-lint if: | always() && !cancelled() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') + strategy: + matrix: + python-version: ["neo4j", "memgraph"] runs-on: "runner-ubuntu-8-32" timeout-minutes: 40 env: - INFRAHUB_DB_TYPE: neo4j + INFRAHUB_DB_TYPE: ${{ matrix.python-version }} steps: - name: "Check out repository code" - uses: "actions/checkout@v3" + uses: "actions/checkout@v4" - name: Install NodeJS - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: 16 cache: 'npm' @@ -424,8 +494,7 @@ jobs: - name: Run End to End Tests working-directory: ./frontend run: npm run cypress:run:e2e - - name: Containers after failure - if: failure() + - name: Containers after tests run: docker ps -a - name: Upload cypress screenshots if: failure() @@ -434,23 +503,23 @@ jobs: name: screenshots path: docs/media/* - name: Display server logs - if: failure() run: docker logs infrahub-infrahub-server-1 - name: Display git 1 logs - if: failure() run: docker logs infrahub-infrahub-git-1 - name: Display git 2 logs - if: failure() run: docker logs infrahub-infrahub-git-2 - name: Display database logs - if: failure() run: docker logs infrahub-database-1 - name: Display server status - if: failure() run: invoke demo.status + # ------------------------------------------ Coverall Report ------------------------------------------ coverall-report: - needs: ["frontend-tests", "backend-tests-default", "python-sdk-tests"] + needs: + - backend-tests-integration + - backend-tests-unit + - frontend-tests + - python-sdk-integration-tests if: | always() && !cancelled() runs-on: ubuntu-latest diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index c3754852c9..8e8d76fb01 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -17,10 +17,10 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run Labeler - uses: crazy-max/ghaction-github-labeler@v4 + uses: crazy-max/ghaction-github-labeler@v5 with: github-token: ${{ secrets.GITHUB_TOKEN }} yaml-file: .github/labels.yml diff --git a/.github/workflows/publish-dev-docker-image.yml b/.github/workflows/publish-dev-docker-image.yml new file mode 100644 index 0000000000..9f6a5c62f3 --- /dev/null +++ b/.github/workflows/publish-dev-docker-image.yml @@ -0,0 +1,59 @@ +--- +# yamllint disable rule:truthy +name: Publish development docker image + +on: + workflow_dispatch: + inputs: + publish: + type: boolean + description: Publish the image + default: true + required: true + commit: + type: string + description: commit sha or branch name + default: '' + required: false + + +jobs: + meta_data: + runs-on: ubuntu-22.04 + outputs: + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + ref: ${{ steps.ref.outputs.ref }} + short_ref: ${{ steps.short_ref.outputs.short_ref }} + steps: + - name: Set GIT ref + run: echo "ref=${{ inputs.commit == '' && github.sha || inputs.commit }} " >> $GITHUB_OUTPUT + id: ref + - name: Set GIT short ref + run: echo "short_ref=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT + id: short_ref + - name: Set docker image meta data + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ${{ env.REGISTRY_IMAGE }} + tags: | + type=raw,value=dev-${{ steps.short_ref.outputs.short_ref }} + labels: | + org.opencontainers.image.source=${{ github.repository }} + org.opencontainers.image.version=dev-${{ steps.short_ref.outputs.short_ref }} + org.opencontainers.image.created=${{ steps.meta.outputs.created }} + flavor: | + latest=false + + + publish-docker-image: + uses: ./.github/workflows/ci-docker-image.yml + secrets: inherit + with: + publish: ${{ inputs.publish }} + version: dev-${{ needs.meta_data.outputs.short_ref }} + ref: ${{ needs.meta_data.outputs.ref }} + tags: ${{needs.meta_data.outputs.tags}} + labels: ${{needs.meta_data.outputs.labels}} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..26f9af635c --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,43 @@ +--- +# yamllint disable rule:truthy +name: New Release + +on: + push: + tags: + - '*' + +jobs: + meta_data: + runs-on: ubuntu-22.04 + outputs: + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + steps: + - name: Set docker image meta data + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ${{ env.REGISTRY_IMAGE }} + tags: | + type=ref,value=${{ inputs.version }} + type=semver,pattern={{version}},value=${{ github.ref_name }} + type=semver,pattern={{major}}.{{minor}},value=${{ github.ref_name }} + type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') }} + labels: | + org.opencontainers.image.source=${{ github.repository }} + org.opencontainers.image.version=${{ github.ref_name }} + org.opencontainers.image.created=${{ steps.meta.outputs.created }} + flavor: | + latest=false + + publish-docker-image: + uses: ./.github/workflows/ci-docker-image.yml + secrets: inherit + with: + publish: true + version: ${{ github.ref_name }} + ref: ${{ github.sha }} + tags: ${{ needs.meta_data.outputs.tags }} + labels: ${{ needs.meta_data.outputs.labels }} diff --git a/.github/workflows/schedule-publish-dev-docker-image.yml b/.github/workflows/schedule-publish-dev-docker-image.yml new file mode 100644 index 0000000000..a64b0b8fe7 --- /dev/null +++ b/.github/workflows/schedule-publish-dev-docker-image.yml @@ -0,0 +1,56 @@ +--- +# yamllint disable rule:truthy +name: Scheduled publish development docker image + +on: + schedule: + - cron: "0 0 * * *" + +jobs: + meta_data: + runs-on: ubuntu-22.04 + outputs: + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + ref: ${{ steps.ref.outputs.ref }} + short_ref: ${{ steps.short_ref.outputs.short_ref }} + date: ${{ steps.date.outputs.date }} + steps: + - name: Checkout development branch + uses: actions/checkout@v4 + with: + ref: develop + - name: Set GIT ref + run: echo "ref=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT + id: ref + - name: Set GIT short ref + run: echo "short_ref=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + id: short_ref + - name: Set date + run: echo "date=$(date '+'%Y-%m-%d')" >> $GITHUB_OUTPUT + id: date + - name: Set docker image meta data + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ${{ env.REGISTRY_IMAGE }} + tags: | + type=raw,value=dev-${{ steps.date.outputs.date }}-${{ steps.short_ref.outputs.short_ref }} + labels: | + org.opencontainers.image.source=${{ github.repository }} + org.opencontainers.image.version=dev-${{ steps.short_ref.outputs.short_ref }} + org.opencontainers.image.created=${{ steps.meta.outputs.created }} + flavor: | + latest=false + + + publish-docker-image: + uses: ./.github/workflows/ci-docker-image.yml + secrets: inherit + with: + publish: true + version: dev-${{ needs.meta_data.outputs.date }}-${{ needs.meta_data.outputs.short_ref }} + ref: ${{ needs.meta_data.outputs.ref }} + tags: ${{needs.meta_data.outputs.tags}} + labels: ${{needs.meta_data.outputs.labels}} diff --git a/.gitignore b/.gitignore index 9f77e863ab..ba69ea471c 100644 --- a/.gitignore +++ b/.gitignore @@ -3,12 +3,16 @@ coverage.xml *.pyc *.env script.py +**/*.local.* .vscode/settings.json node_modules/* development/docker-compose.override.yml development/docker-compose.dev-override.yml .DS_Store .python-version +.ruff_cache +**/.ruff_cache +**/.idea/** # Direnv files (https://direnv.net/) .direnv/ diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 0000000000..02ca37b4ff --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,11 @@ +--- +default: true +MD013: false # disables max line-length +MD024: false # disables 'no duplicate headings', which we use in tabs for instructions +MD025: + front_matter_title: "" # prevent collisions with h1s and frontmatter titles +MD029: false # allows manually creating ordered lists +MD033: false # allows inline html to override markdown styles +MD034: false # no-bare-urls +MD045: false # no alt text around images +MD047: false # single trailing newline diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ff6ae90dce..13f5901a65 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,7 +10,13 @@ repos: - id: check-toml - id: check-yaml - id: end-of-file-fixer - - repo: https://github.com/pycqa/isort - rev: 5.10.1 + + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.1.6 hooks: - - id: isort + # Run the linter. + - id: ruff + args: [--fix] + # Run the formatter. + - id: ruff-format diff --git a/.vale.ini b/.vale.ini new file mode 100644 index 0000000000..e0d258dd7a --- /dev/null +++ b/.vale.ini @@ -0,0 +1,6 @@ +StylesPath = .vale/styles + +MinAlertLevel = warning + +[docs/**/*.md] +BasedOnStyles = Infrahub \ No newline at end of file diff --git a/.vale/styles/Infrahub/branded-terms-case-swap.yml b/.vale/styles/Infrahub/branded-terms-case-swap.yml new file mode 100644 index 0000000000..214ff2f10f --- /dev/null +++ b/.vale/styles/Infrahub/branded-terms-case-swap.yml @@ -0,0 +1,14 @@ +--- +extends: substitution +message: Use '%s' instead of '%s' +level: error +ignorecase: false +action: + name: replace +swap: + (?i:Github): GitHub + (?i:gitpod): GitPod + (?i:Graphql): GraphQL + infrahub(?:\s|$): Infrahub + (?i:Openconfig): OpenConfig + "[Rr]file": RFile diff --git a/.vale/styles/Infrahub/colon-space.yml b/.vale/styles/Infrahub/colon-space.yml new file mode 100644 index 0000000000..ced1d5b39b --- /dev/null +++ b/.vale/styles/Infrahub/colon-space.yml @@ -0,0 +1,8 @@ +--- +extends: substitution +message: "Use a space after a colon, but not before" +level: warning +ignorecase: true +nonword: true +swap: + '(?<=\s):(?=\s+)': ': ' diff --git a/.vale/styles/Infrahub/eg-ie.yml b/.vale/styles/Infrahub/eg-ie.yml new file mode 100644 index 0000000000..f0933d8fda --- /dev/null +++ b/.vale/styles/Infrahub/eg-ie.yml @@ -0,0 +1,12 @@ +--- +extends: substitution +message: "Instead of %s, use ',i.e.,' or 'for example:'." +level: warning +ignorecase: true +nonword: true +action: + name: replace +swap: + - e\.g\.: i.e. + - e\. g\.: i.e. + - i\. e\.: i.e. diff --git a/.vale/styles/Infrahub/oxford-comma.yml b/.vale/styles/Infrahub/oxford-comma.yml new file mode 100644 index 0000000000..846c9006af --- /dev/null +++ b/.vale/styles/Infrahub/oxford-comma.yml @@ -0,0 +1,8 @@ +--- +extends: existence +message: "Use a comma before the last 'and' or 'or' in a list of items." +level: suggestion +scope: sentence +nonword: true +tokens: + - '(?:[^\s,]+,){1,} \w+ (?:and|or) \w+[.?!]' diff --git a/.vale/styles/Infrahub/sentence-case.yml b/.vale/styles/Infrahub/sentence-case.yml new file mode 100644 index 0000000000..1314f252f4 --- /dev/null +++ b/.vale/styles/Infrahub/sentence-case.yml @@ -0,0 +1,40 @@ +--- +extends: capitalization +message: "'%s' should use sentence case" +level: warning +scope: + - heading.h1 + - heading.h2 + - heading.h3 + - heading.h4 + - heading.h5 + - heading.h6 +match: $sentence +exceptions: + - API + - Attributes + - CI + - Docker Compose + - E2E + - Generics + - Git + - GitHub + - GitLab + - GitPod + - GraphQL + - GraphQLQuery + - Helm + - Infrahub + - infrahubctl + - JavaScript + - Jinja + - Jinja2 + - Namespace + - Node + - Python + - REST + - RFile + - SDK + - TransformPython + - Vale + - VS Code diff --git a/.vale/styles/Infrahub/simple-easy.yml b/.vale/styles/Infrahub/simple-easy.yml new file mode 100644 index 0000000000..5bdbce53c1 --- /dev/null +++ b/.vale/styles/Infrahub/simple-easy.yml @@ -0,0 +1,9 @@ +--- +extends: existence +message: "Remove %s and avoid words that minimize the effort level required." +level: warning +ignorecase: true +tokens: + - easy + - simple + - simply diff --git a/.vale/styles/Infrahub/spelling.yml b/.vale/styles/Infrahub/spelling.yml new file mode 100644 index 0000000000..1162d3be9f --- /dev/null +++ b/.vale/styles/Infrahub/spelling.yml @@ -0,0 +1,7 @@ +--- +extends: spelling +message: "Did you really mean '%s'?" +level: error +filters: + - '[pP]y.*\b' +ignore: spelling-exceptions.txt diff --git a/.vale/styles/Infrahub/swap.yml b/.vale/styles/Infrahub/swap.yml new file mode 100644 index 0000000000..25e33e9b91 --- /dev/null +++ b/.vale/styles/Infrahub/swap.yml @@ -0,0 +1,10 @@ +--- +extends: substitution +message: "Use '%s' instead of '%s'" +level: error +ignorecase: true +action: + name: replace +swap: + config\b(?!\.\w): configuration + repo: repository diff --git a/.vale/styles/spelling-exceptions.txt b/.vale/styles/spelling-exceptions.txt new file mode 100644 index 0000000000..63b8b36565 --- /dev/null +++ b/.vale/styles/spelling-exceptions.txt @@ -0,0 +1,49 @@ +agent +APIs +async +autoflake +config +cypher +datastore +dev +docker +dockerfile +e2e +enum +enums +env +eslint +excalidraw +fanout +github +graphene +graphiql +graphql +greymatter +Infrahub +Infrahub's +infrahubctl +IPAddress +IPHost +IPNetwork +isort +jinja +kbps +markdownlint +memgraph +namespace +namespaces +o'brian +openconfig +opentelemetry +PyPI +rebase +repo +REST +rfile +rfiles +sdk +toml +uncheck +validators +yamllint diff --git a/.yamllint.yml b/.yamllint.yml index beb797f226..ffde13e569 100644 --- a/.yamllint.yml +++ b/.yamllint.yml @@ -7,6 +7,8 @@ ignore: | /repositories /frontend/node_modules /node_modules + # https://github.com/sbaudoin/yamllint/issues/16 + /helm/templates rules: new-lines: disable diff --git a/backend/infrahub/api/background.py b/backend/infrahub/api/background.py deleted file mode 100644 index 70f3a48691..0000000000 --- a/backend/infrahub/api/background.py +++ /dev/null @@ -1,25 +0,0 @@ -import asyncio -import random - -from fastapi.logger import logger - -from infrahub.database import InfrahubDatabase -from infrahub.tasks.registry import refresh_branches - - -class BackgroundRunner: - def __init__(self, db: InfrahubDatabase, database_name: str, interval: int = 10): - self.db = db - self.database_name = database_name - self.interval = interval - - async def run(self): - logger.info("Background process started") - - while True: - # Add some randomness to the interval to avoid having all workers pulling the latest update at the same time - random_number = random.randint(1, 4) - await asyncio.sleep(self.interval + random_number - 2) - - async with self.db.start_session() as db: - await refresh_branches(db=db) diff --git a/backend/infrahub/api/diff/__init__.py b/backend/infrahub/api/diff/__init__.py new file mode 100644 index 0000000000..25b37e6879 --- /dev/null +++ b/backend/infrahub/api/diff/__init__.py @@ -0,0 +1,3 @@ +from .diff import router + +__all__ = ["router"] diff --git a/backend/infrahub/api/diff.py b/backend/infrahub/api/diff/diff.py similarity index 98% rename from backend/infrahub/api/diff.py rename to backend/infrahub/api/diff/diff.py index 612a584685..2bef4825a2 100644 --- a/backend/infrahub/api/diff.py +++ b/backend/infrahub/api/diff/diff.py @@ -13,10 +13,12 @@ from infrahub import config from infrahub.api.dependencies import get_branch_dep, get_current_user, get_db from infrahub.core import get_branch, registry -from infrahub.core.branch import Branch # noqa: TCH001 -from infrahub.core.branch import Diff # noqa: TCH001 -from infrahub.core.branch import NodeDiffElement # noqa: TCH001 -from infrahub.core.branch import RelationshipDiffElement # noqa: TCH001 +from infrahub.core.branch import ( + Branch, # noqa: TCH001 + Diff, # noqa: TCH001 + NodeDiffElement, # noqa: TCH001 + RelationshipDiffElement, # noqa: TCH001 +) from infrahub.core.constants import ( BranchSupportType, DiffAction, @@ -26,6 +28,8 @@ from infrahub.core.schema_manager import INTERNAL_SCHEMA_NODE_KINDS from infrahub.database import InfrahubDatabase # noqa: TCH001 +from .validation_models import DiffQueryValidated + if TYPE_CHECKING: from infrahub.message_bus.rpc import InfrahubRpcClient @@ -929,8 +933,13 @@ async def get_diff_data( branch_only: bool = True, _: str = Depends(get_current_user), ) -> BranchDiff: + query = DiffQueryValidated(branch=branch, time_from=time_from, time_to=time_to, branch_only=branch_only) diff = await branch.diff( - db=db, diff_from=time_from, diff_to=time_to, branch_only=branch_only, namespaces_exclude=["Schema"] + db=db, + diff_from=query.time_from, + diff_to=query.time_to, + branch_only=query.branch_only, + namespaces_exclude=["Schema"], ) schema = registry.schema.get_full(branch=branch) diff_payload = DiffPayload(db=db, diff=diff, kinds_to_include=list(schema.keys())) @@ -946,8 +955,13 @@ async def get_diff_schema( branch_only: bool = True, _: str = Depends(get_current_user), ) -> BranchDiff: + query = DiffQueryValidated(branch=branch, time_from=time_from, time_to=time_to, branch_only=branch_only) diff = await branch.diff( - db=db, diff_from=time_from, diff_to=time_to, branch_only=branch_only, kinds_include=INTERNAL_SCHEMA_NODE_KINDS + db=db, + diff_from=query.time_from, + diff_to=query.time_to, + branch_only=query.branch_only, + kinds_include=INTERNAL_SCHEMA_NODE_KINDS, ) diff_payload = DiffPayload(db=db, diff=diff) return await diff_payload.generate_diff_payload() diff --git a/backend/infrahub/api/diff/validation_models.py b/backend/infrahub/api/diff/validation_models.py new file mode 100644 index 0000000000..d75fc0457e --- /dev/null +++ b/backend/infrahub/api/diff/validation_models.py @@ -0,0 +1,37 @@ +from typing import Any, Dict, Optional + +from pydantic import BaseModel, root_validator, validator + +from infrahub.core.branch import Branch +from infrahub.core.timestamp import Timestamp + + +class DiffQueryValidated(BaseModel): + branch: Branch + time_from: Optional[str] + time_to: Optional[str] + branch_only: bool + + class Config: + arbitrary_types_allowed = True + + @validator("time_from", "time_to", pre=True) + @classmethod + def validate_time(cls, value: Optional[str]) -> Optional[str]: + if not value: + return None + Timestamp(value) + return value + + @root_validator(skip_on_failure=True) + @classmethod + def validate_time_from_if_required(cls, values: Dict[str, Any]) -> Dict[str, Any]: + branch: Optional[Branch] = values.get("branch") + time_from: Optional[Timestamp] = values.get("time_from") + if getattr(branch, "is_default", False) and not time_from: + branch_name = getattr(branch, "name", "") + raise ValueError(f"time_from is mandatory when diffing on the default branch `{branch_name}`.") + time_to: Optional[Timestamp] = values.get("time_to") + if time_to and time_from and time_to < time_from: + raise ValueError("time_from and time_to are not a valid time range") + return values diff --git a/backend/infrahub/api/exception_handlers.py b/backend/infrahub/api/exception_handlers.py new file mode 100644 index 0000000000..3ae3579eea --- /dev/null +++ b/backend/infrahub/api/exception_handlers.py @@ -0,0 +1,22 @@ +from fastapi.responses import JSONResponse +from pydantic import ValidationError + +from infrahub.exceptions import Error + + +async def generic_api_exception_handler(_, exc: Exception, http_code: int = 500) -> JSONResponse: + """Generic API Exception handler.""" + if isinstance(exc, Error): + if exc.HTTP_CODE: + http_code = exc.HTTP_CODE + messages = [str(exc.message) if exc.message else exc.DESCRIPTION] + elif isinstance(exc, ValidationError): + messages = [ed["msg"] for ed in exc.errors()] + else: + messages = [str(exc)] + error_dict = { + "data": None, + "errors": [{"message": message, "extensions": {"code": http_code}} for message in messages], + } + + return JSONResponse(status_code=http_code, content=error_dict) diff --git a/backend/infrahub/api/exceptions.py b/backend/infrahub/api/exceptions.py new file mode 100644 index 0000000000..3a77b2161e --- /dev/null +++ b/backend/infrahub/api/exceptions.py @@ -0,0 +1,8 @@ +from infrahub.exceptions import Error + + +class QueryValidationError(Error): + HTTP_CODE = 400 + + def __init__(self, message: str): + self.message = message diff --git a/backend/infrahub/api/file.py b/backend/infrahub/api/file.py index 037e0044a0..700e8327db 100644 --- a/backend/infrahub/api/file.py +++ b/backend/infrahub/api/file.py @@ -18,7 +18,7 @@ from infrahub.message_bus.responses import ContentResponse if TYPE_CHECKING: - from infrahub.message_bus.rpc import InfrahubRpcClient + from infrahub.services import InfrahubServices router = APIRouter(prefix="/file") @@ -35,7 +35,7 @@ async def get_file( _: str = Depends(get_current_user), ) -> PlainTextResponse: """Retrieve a file from a git repository.""" - rpc_client: InfrahubRpcClient = request.app.state.rpc_client + service: InfrahubServices = request.app.state.service repo = await NodeManager.get_one_by_id_or_default_filter( db=db, @@ -57,6 +57,6 @@ async def get_file( file=file_path, ) - response = await rpc_client.rpc(message=message) + response = await service.message_bus.rpc(message=message) content = response.parse(response_class=ContentResponse) return PlainTextResponse(content=content.content) diff --git a/backend/infrahub/api/menu.py b/backend/infrahub/api/menu.py index 867c050c29..5e1c3da092 100644 --- a/backend/infrahub/api/menu.py +++ b/backend/infrahub/api/menu.py @@ -67,7 +67,7 @@ async def get_menu( continue if isinstance(model, NodeSchema) and "CoreGroup" in model.inherit_from: - groups.children.append(InterfaceMenu(title=model.label or model.name, path=f"/groups/{model.kind}")) + groups.children.append(InterfaceMenu(title=model.menu_title, path=f"/groups/{model.kind}")) continue menu_name = model.menu_placement or "base" @@ -75,9 +75,7 @@ async def get_menu( structure[menu_name] = [] structure[menu_name].append( - InterfaceMenu( - title=model.label or model.name, path=f"/objects/{model.kind}", icon=model.icon or "", kind=model.kind - ) + InterfaceMenu(title=model.menu_title, path=f"/objects/{model.kind}", icon=model.icon or "", kind=model.kind) ) for menu_item in structure["base"]: diff --git a/backend/infrahub/api/schema.py b/backend/infrahub/api/schema.py index 3dc14e1e1f..0e2ba563fe 100644 --- a/backend/infrahub/api/schema.py +++ b/backend/infrahub/api/schema.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import List, Optional +from typing import TYPE_CHECKING, List, Optional, Union from fastapi import APIRouter, BackgroundTasks, Depends from pydantic import BaseModel, Field, root_validator @@ -10,7 +10,9 @@ from infrahub.api.dependencies import get_branch_dep, get_current_user, get_db from infrahub.core import registry from infrahub.core.branch import Branch # noqa: TCH001 -from infrahub.core.schema import GenericSchema, NodeSchema, SchemaRoot +from infrahub.core.models import SchemaBranchHash # noqa: TCH001 +from infrahub.core.schema import GenericSchema, GroupSchema, NodeSchema, SchemaRoot +from infrahub.core.schema_manager import SchemaNamespace # noqa: TCH001 from infrahub.database import InfrahubDatabase # noqa: TCH001 from infrahub.exceptions import PermissionDeniedError, SchemaNotFound from infrahub.log import get_logger @@ -18,12 +20,25 @@ from infrahub.services import services from infrahub.worker import WORKER_IDENTITY +if TYPE_CHECKING: + from typing_extensions import Self + + log = get_logger() router = APIRouter(prefix="/schema") -class APINodeSchema(NodeSchema): +class APISchemaMixin: + @classmethod + def from_schema(cls, schema: NodeSchema) -> Self: + data = schema.dict() + data["hash"] = schema.get_hash() + return cls(**data) + + +class APINodeSchema(NodeSchema, APISchemaMixin): api_kind: Optional[str] = Field(default=None, alias="kind") + hash: str @root_validator(pre=True) @classmethod @@ -35,8 +50,9 @@ def set_kind( return values -class APIGenericSchema(GenericSchema): +class APIGenericSchema(GenericSchema, APISchemaMixin): api_kind: Optional[str] = Field(default=None, alias="kind") + hash: str @root_validator(pre=True) @classmethod @@ -49,8 +65,10 @@ def set_kind( class SchemaReadAPI(BaseModel): - nodes: List[APINodeSchema] - generics: List[APIGenericSchema] + main: str = Field(description="Main hash for the entire schema") + nodes: List[APINodeSchema] = Field(default_factory=list) + generics: List[APIGenericSchema] = Field(default_factory=list) + namespaces: List[SchemaNamespace] = Field(default_factory=list) class SchemaLoadAPI(SchemaRoot): @@ -66,16 +84,46 @@ class SchemasLoadAPI(SchemaRoot): async def get_schema( branch: Branch = Depends(get_branch_dep), ) -> SchemaReadAPI: - log.info("schema_request", branch=branch.name) - - full_schema = registry.schema.get_full(branch=branch) + log.debug("schema_request", branch=branch.name) + schema_branch = registry.schema.get_schema_branch(name=branch.name) + full_schema = schema_branch.get_all() return SchemaReadAPI( - nodes=[value.dict() for value in full_schema.values() if isinstance(value, NodeSchema)], - generics=[value.dict() for value in full_schema.values() if isinstance(value, GenericSchema)], + main=registry.schema.get_schema_branch(name=branch.name).get_hash(), + nodes=[APINodeSchema.from_schema(value) for value in full_schema.values() if isinstance(value, NodeSchema)], + generics=[ + APIGenericSchema.from_schema(value) for value in full_schema.values() if isinstance(value, GenericSchema) + ], + namespaces=schema_branch.get_namespaces(), ) +@router.get("/summary") +async def get_schema_summary( + branch: Branch = Depends(get_branch_dep), +) -> SchemaBranchHash: + log.debug("schema_summary_request", branch=branch.name) + schema_branch = registry.schema.get_schema_branch(name=branch.name) + return schema_branch.get_hash_full() + + +@router.get("/{schema_kind}") +async def get_schema_by_kind( + schema_kind: str, + branch: Branch = Depends(get_branch_dep), +) -> Union[APINodeSchema, APIGenericSchema]: + log.debug("schema_kind_request", branch=branch.name) + + schema = registry.schema.get(name=schema_kind, branch=branch) + + if isinstance(schema, GroupSchema): + return JSONResponse(status_code=422, content={"error": "GroupSchema aren't supported via this endpoint"}) + if isinstance(schema, NodeSchema): + return APINodeSchema.from_schema(schema=schema) + if isinstance(schema, GenericSchema): + return APIGenericSchema.from_schema(schema=schema) + + @router.post("/load") async def load_schema( schemas: SchemasLoadAPI, @@ -84,7 +132,7 @@ async def load_schema( branch: Branch = Depends(get_branch_dep), _: str = Depends(get_current_user), ) -> JSONResponse: - log.info("load_request", branch=branch.name) + log.info("schema_load_request", branch=branch.name) errors: List[str] = [] for schema in schemas.schemas: @@ -110,6 +158,7 @@ async def load_schema( diff = tmp_schema.diff(branch_schema) if diff.all: + log.info(f"Schema has diff, will need to be updated {diff.all}", branch=branch.name) async with db.start_transaction() as db: await registry.schema.update_schema_branch( schema=tmp_schema, db=db, branch=branch.name, limit=diff.all, update_db=True diff --git a/backend/infrahub/api/storage.py b/backend/infrahub/api/storage.py index 834c14150c..5e29b3d25c 100644 --- a/backend/infrahub/api/storage.py +++ b/backend/infrahub/api/storage.py @@ -22,16 +22,16 @@ class UploadContentPayload(BaseModel): @router.get("/object/{identifier:str}") -async def get_file( +def get_file( identifier: str, _: str = Depends(get_current_user), ) -> Response: - content = await registry.storage.retrieve(identifier=identifier) + content = registry.storage.retrieve(identifier=identifier) return Response(content=content) @router.post("/upload/content") -async def upload_content( +def upload_content( item: UploadContentPayload, _: str = Depends(get_current_user), ) -> UploadResponse: @@ -43,12 +43,12 @@ async def upload_content( identifier = str(UUIDT()) checksum = hashlib.md5(file_content).hexdigest() - await registry.storage.store(identifier=identifier, content=file_content) + registry.storage.store(identifier=identifier, content=file_content) return UploadResponse(identifier=identifier, checksum=checksum) @router.post("/upload/file") -async def upload_file( +def upload_file( file: UploadFile = File(...), _: str = Depends(get_current_user), ) -> UploadResponse: @@ -60,5 +60,5 @@ async def upload_file( identifier = str(UUIDT()) checksum = hashlib.md5(file_content).hexdigest() - await registry.storage.store(identifier=identifier, content=file_content) + registry.storage.store(identifier=identifier, content=file_content) return UploadResponse(identifier=identifier, checksum=checksum) diff --git a/backend/infrahub/api/transformation.py b/backend/infrahub/api/transformation.py index 7a9137f59b..15dd0179c9 100644 --- a/backend/infrahub/api/transformation.py +++ b/backend/infrahub/api/transformation.py @@ -19,7 +19,7 @@ from infrahub.message_bus.responses import TemplateResponse, TransformResponse if TYPE_CHECKING: - from infrahub.message_bus.rpc import InfrahubRpcClient + from infrahub.services import InfrahubServices router = APIRouter() @@ -80,7 +80,7 @@ async def transform_python( return JSONResponse(status_code=500, content={"errors": errors}) - rpc_client: InfrahubRpcClient = request.app.state.rpc_client + service: InfrahubServices = request.app.state.service message = messages.TransformPythonData( repository_id=repository.id, # type: ignore[attr-defined] @@ -91,7 +91,7 @@ async def transform_python( data=result.data, ) - response = await rpc_client.rpc(message=message) + response = await service.message_bus.rpc(message=message) template = response.parse(response_class=TransformResponse) return JSONResponse(content=template.transformed_data) @@ -143,7 +143,7 @@ async def generate_rfile( return JSONResponse(status_code=500, content={"errors": errors}) - rpc_client: InfrahubRpcClient = request.app.state.rpc_client + service: InfrahubServices = request.app.state.service message = messages.TransformJinjaTemplate( repository_id=repository.id, # type: ignore[attr-defined] @@ -154,7 +154,7 @@ async def generate_rfile( data=result.data, ) - response = await rpc_client.rpc(message=message) + response = await service.message_bus.rpc(message=message) template = response.parse(response_class=TemplateResponse) return PlainTextResponse(content=template.rendered_template) diff --git a/backend/infrahub/checks.py b/backend/infrahub/checks.py index 2043ef53a0..39921d48d6 100644 --- a/backend/infrahub/checks.py +++ b/backend/infrahub/checks.py @@ -1,128 +1,9 @@ -import asyncio -import json -import os -from abc import abstractmethod -from typing import Any, Optional +from warnings import warn -from git.repo import Repo -from infrahub_sdk import InfrahubClient +from infrahub_sdk.checks import INFRAHUB_CHECK_VARIABLE_TO_IMPORT, InfrahubCheck -INFRAHUB_CHECK_VARIABLE_TO_IMPORT = "INFRAHUB_CHECKS" +warn( + f"The module {__name__} is deprecated. Update to use infrahub_sdk.checks instead.", DeprecationWarning, stacklevel=2 +) - -class InfrahubCheck: - name: Optional[str] = None - query: str = "" - timeout: int = 10 - rebase: bool = True - - def __init__(self, branch=None, root_directory=None, output=None): - self.data = None - self.git = None - - self.logs = [] - self.passed = None - - self.output = output - - self.branch = branch - - self.root_directory = root_directory or os.getcwd() - - self.client: InfrahubClient - - if not self.name: - self.name = self.__class__.__name__ - - if not self.query: - raise ValueError("A query must be provided") - - @classmethod - async def init(cls, client: Optional[InfrahubClient] = None, *args, **kwargs): - """Async init method, If an existing InfrahubClient client hasn't been provided, one will be created automatically.""" - - instance = cls(*args, **kwargs) - instance.client = client or InfrahubClient() - - return instance - - @property - def errors(self): - return [log for log in self.logs if log["level"] == "ERROR"] - - def _write_log_entry( - self, message: Any, level: str, object_id: Optional[Any] = None, object_type: Optional[Any] = None - ) -> None: - log_message = {"level": level, "message": message, "branch": self.branch_name} - if object_id: - log_message["object_id"] = object_id - if object_type: - log_message["object_type"] = object_type - self.logs.append(log_message) - - if self.output == "stdout": - print(json.dumps(log_message)) - - def log_error(self, message, object_id=None, object_type=None) -> None: - self._write_log_entry(message=message, level="ERROR", object_id=object_id, object_type=object_type) - - def log_info(self, message, object_id=None, object_type=None) -> None: - self._write_log_entry(message=message, level="INFO", object_id=object_id, object_type=object_type) - - @property - def log_entries(self) -> str: - output = "" - for log in self.logs: - output += "-----------------------\n" - output += f"Message: {log['message']}\n" - output += f"Level: {log['level']}\n" - if "object_id" in log: - output += f"Object ID: {log['object_id']}\n" - if "object_type" in log: - output += f"Object ID: {log['object_type']}\n" - return output - - @property - def branch_name(self) -> str: - """Return the name of the current git branch.""" - - if self.branch: - return self.branch - - if not self.git: - self.git = Repo(self.root_directory) - - self.branch = str(self.git.active_branch) - - return self.branch - - @abstractmethod - def validate(self): - """Code to validate the status of this check.""" - - async def collect_data(self): - """Query the result of the GraphQL Query defined in sef.query and store the result in self.data""" - - data = await self.client.query_gql_query(name=self.query, branch_name=self.branch_name, rebase=self.rebase) - self.data = data - - async def run(self) -> bool: - """Execute the check after collecting the data from the GraphQL query. - The result of the check is determined based on the presence or not of ERROR log messages.""" - - await self.collect_data() - - validate_method = getattr(self, "validate") - if asyncio.iscoroutinefunction(validate_method): - await validate_method() - else: - validate_method() - - nbr_errors = len([log for log in self.logs if log["level"] == "ERROR"]) - - self.passed = bool(nbr_errors == 0) - - if self.passed: - self.log_info("Check succesfully completed") - - return self.passed +__all__ = ["INFRAHUB_CHECK_VARIABLE_TO_IMPORT", "InfrahubCheck"] diff --git a/backend/infrahub/cli/__init__.py b/backend/infrahub/cli/__init__.py index 922760c3c8..0c0de3d33b 100644 --- a/backend/infrahub/cli/__init__.py +++ b/backend/infrahub/cli/__init__.py @@ -4,7 +4,6 @@ import infrahub.config as config from infrahub.cli.db import app as db_app -from infrahub.cli.doc import app as doc_app from infrahub.cli.events import app as events_app from infrahub.cli.generate_schema import app as generate_schema_app from infrahub.cli.git_agent import app as git_app @@ -21,7 +20,6 @@ app.add_typer(db_app, name="db") app.add_typer(events_app, name="events", help="Interact with the events system.") app.add_typer(generate_schema_app, name="generate-schema") -app.add_typer(doc_app, name="doc") async def _init_shell(config_file: str) -> None: diff --git a/backend/infrahub/cli/db.py b/backend/infrahub/cli/db.py index 1f4c5dcd6c..09f5bfc930 100644 --- a/backend/infrahub/cli/db.py +++ b/backend/infrahub/cli/db.py @@ -71,7 +71,7 @@ async def _load_test_data(dataset: str) -> None: def init( config_file: str = typer.Option( "infrahub.toml", envvar="INFRAHUB_CONFIG", help="Location of the configuration file to use for Infrahub" - ) + ), ) -> None: """Erase the content of the database and initialize it with the core schema.""" @@ -89,7 +89,7 @@ def load_test_data( ), dataset: str = "dataset01", ) -> None: - """Load test data into the database from the test_data directory.""" + """Load test data into the database from the `test_data` directory.""" logging.getLogger("neo4j").setLevel(logging.ERROR) diff --git a/backend/infrahub/cli/doc.py b/backend/infrahub/cli/doc.py deleted file mode 100644 index cf2207f876..0000000000 --- a/backend/infrahub/cli/doc.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -from pathlib import Path - -import jinja2 -import typer - -from infrahub.core.schema import internal_schema - -app = typer.Typer() - - -DOCUMENTATION_DIRECTORY = "../../../docs" - - -@app.command() -def generate_schema() -> None: - """Generate documentation for the schema""" - - schemas_to_generate = ["node", "attribute", "relationship", "generic"] - here = os.path.abspath(os.path.dirname(__file__)) - - for schema_name in schemas_to_generate: - template_file = os.path.join(here, f"{DOCUMENTATION_DIRECTORY}/schema/{schema_name}.j2") - output_file = os.path.join(here, f"{DOCUMENTATION_DIRECTORY}/schema/{schema_name}.md") - if not os.path.exists(template_file): - print(f"Unable to find the template file at {template_file}") - raise typer.Exit(1) - - template_text = Path(template_file).read_text(encoding="utf-8") - - environment = jinja2.Environment() - template = environment.from_string(template_text) - rendered_file = template.render(schema=internal_schema) - - with open(output_file, "w", encoding="utf-8") as f: - f.write(rendered_file) - - print(f"Schema generated for {schema_name}") - - print("Schema documentation generated") diff --git a/backend/infrahub/cli/git_agent.py b/backend/infrahub/cli/git_agent.py index c114135a4b..49807aa33a 100644 --- a/backend/infrahub/cli/git_agent.py +++ b/backend/infrahub/cli/git_agent.py @@ -11,6 +11,7 @@ from rich.logging import RichHandler from infrahub import config +from infrahub.components import ComponentType from infrahub.core.initialization import initialization from infrahub.database import InfrahubDatabase, get_db from infrahub.git import initialize_repositories_directory @@ -19,11 +20,9 @@ from infrahub.log import clear_log_context, get_logger from infrahub.message_bus import get_broker, messages from infrahub.message_bus.operations import execute_message -from infrahub.message_bus.worker import WorkerCallback from infrahub.services import InfrahubServices from infrahub.services.adapters.cache.redis import RedisCache from infrahub.services.adapters.message_bus.rabbitmq import RabbitMQMessageBus -from infrahub.worker import WORKER_IDENTITY app = typer.Typer() @@ -45,7 +44,7 @@ def callback() -> None: """ -async def subscribe_rpcs_queue(client: InfrahubClient) -> None: +async def subscribe_rpcs_queue(service: InfrahubServices) -> None: """Subscribe to the RPCs queue and execute the corresponding action when a valid RPC is received.""" # TODO generate an exception if the broker is not properly configured # and return a proper message to the user @@ -53,27 +52,9 @@ async def subscribe_rpcs_queue(client: InfrahubClient) -> None: # Create a channel and subscribe to the incoming RPC queue channel = await connection.channel() - queue = await channel.declare_queue( - f"{config.SETTINGS.broker.namespace}.rpcs", durable=True, arguments={"x-queue-type": "quorum"} - ) - events_queue = await channel.declare_queue(name=f"worker-events-{WORKER_IDENTITY}", exclusive=True) - - exchange = await channel.declare_exchange(f"{config.SETTINGS.broker.namespace}.events", type="topic", durable=True) - await events_queue.bind(exchange, routing_key="refresh.registry.*") - delayed_exchange = await channel.get_exchange(name=f"{config.SETTINGS.broker.namespace}.delayed") - driver = await get_db() - database = InfrahubDatabase(driver=driver) - service = InfrahubServices( - cache=RedisCache(), - client=client, - database=database, - message_bus=RabbitMQMessageBus(channel=channel, exchange=exchange, delayed_exchange=delayed_exchange), - ) - async with service.database.start_session() as db: - await initialization(db=db) + await channel.set_qos(prefetch_count=1) - worker_callback = WorkerCallback(service=service) - await events_queue.consume(worker_callback.run_command, no_ack=True) + queue = await channel.get_queue(f"{config.SETTINGS.broker.namespace}.rpcs") log.info("Waiting for RPC instructions to execute .. ") async with queue.iterator() as qiterator: async for message in qiterator: @@ -93,23 +74,15 @@ async def subscribe_rpcs_queue(client: InfrahubClient) -> None: log.exception("Processing error for message %r" % message) -async def initialize_git_agent(client: InfrahubClient) -> None: +async def initialize_git_agent(service: InfrahubServices) -> None: log.info("Initializing Git Agent ...") initialize_repositories_directory() # TODO Validate access to the GraphQL API with the proper credentials - await sync_remote_repositories(client=client) - + await sync_remote_repositories(service=service) -async def monitor_remote_activity(client: InfrahubClient, interval: int) -> None: - log.info("Monitoring remote repository for updates .. ") - while True: - await sync_remote_repositories(client=client) - await asyncio.sleep(interval) - - -async def _start(debug: bool, interval: int, port: int) -> None: +async def _start(debug: bool, port: int) -> None: """Start Infrahub Git Agent.""" log_level = "DEBUG" if debug else "INFO" @@ -128,11 +101,23 @@ async def _start(debug: bool, interval: int, port: int) -> None: # Initialize the lock initialize_lock() - await initialize_git_agent(client=client) + driver = await get_db() + database = InfrahubDatabase(driver=driver) + service = InfrahubServices( + cache=RedisCache(), + client=client, + database=database, + message_bus=RabbitMQMessageBus(), + component_type=ComponentType.GIT_AGENT, + ) + await service.initialize() + await initialize_git_agent(service=service) + + async with service.database.start_session() as db: + await initialization(db=db) tasks = [ - asyncio.create_task(subscribe_rpcs_queue(client=client)), - asyncio.create_task(monitor_remote_activity(client=client, interval=interval)), + asyncio.create_task(subscribe_rpcs_queue(service=service)), ] await asyncio.gather(*tasks) @@ -140,7 +125,6 @@ async def _start(debug: bool, interval: int, port: int) -> None: @app.command() def start( - interval: int = typer.Option(10, help="Interval in sec between remote repositories update."), debug: bool = typer.Option(False, help="Enable advanced logging and troubleshooting"), config_file: str = typer.Option( "infrahub.toml", envvar="INFRAHUB_CONFIG", help="Location of the configuration file to use for Infrahub" @@ -159,4 +143,4 @@ def start( config.load_and_exit(config_file_name=config_file) - aiorun(_start(interval=interval, debug=debug, port=port)) + aiorun(_start(debug=debug, port=port)) diff --git a/backend/infrahub/components.py b/backend/infrahub/components.py new file mode 100644 index 0000000000..0a2f61655d --- /dev/null +++ b/backend/infrahub/components.py @@ -0,0 +1,7 @@ +from enum import IntFlag + + +class ComponentType(IntFlag): + NONE = 0 + API_SERVER = 1 + GIT_AGENT = 2 diff --git a/backend/infrahub/config.py b/backend/infrahub/config.py index b91a33c1b0..5a65f24917 100644 --- a/backend/infrahub/config.py +++ b/backend/infrahub/config.py @@ -5,11 +5,15 @@ import sys from enum import Enum from pathlib import Path -from typing import Any, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Dict, List, Optional import toml from infrahub_sdk import generate_uuid -from pydantic import BaseSettings, Field, ValidationError +from pydantic import BaseModel, BaseSettings, Field, ValidationError + +if TYPE_CHECKING: + from infrahub.services.adapters.cache import InfrahubCache + from infrahub.services.adapters.message_bus import InfrahubMessageBus SETTINGS: Settings = None @@ -23,7 +27,8 @@ class DatabaseType(str, Enum): class StorageDriver(str, Enum): - LOCAL = "local" + FileSystemStorage = "local" + InfrahubS3ObjectStorage = "s3" class TraceExporterType(str, Enum): @@ -54,17 +59,47 @@ class Config: case_sensitive = False -class StorageSettings(BaseSettings): - driver: StorageDriver = StorageDriver.LOCAL - settings: Optional[Dict[str, str]] = Field(default=None) +class FileSystemStorageSettings(BaseSettings): + path_: str = Field(default="/opt/infrahub/storage", alias="path") + + class Config: + fields = {"path_": {"env": "INFRAHUB_STORAGE_LOCAL_PATH"}} + + +class S3StorageSettings(BaseSettings): + access_key_id: str = Field(default="", alias="AWS_ACCESS_KEY_ID") + secret_access_key: str = Field(default="", alias="AWS_SECRET_ACCESS_KEY") + bucket_name: str = Field(default="", alias="AWS_S3_BUCKET_NAME") + endpoint_url: str = Field(default="", alias="AWS_S3_ENDPOINT_URL") + use_ssl: bool = Field(default=True, alias="AWS_S3_US_SSL") + default_acl: str = Field(default="", alias="AWS_DEFAULT_ACL") + querystring_auth: bool = Field(default=False, alias="AWS_QUERYSTRING_AUTH") + custom_domain: str = Field(default="", alias="AWS_S3_CUSTOM_DOMAIN") class Config: - case_sensitive = False fields = { - "driver": {"env": "INFRAHUB_STORAGE_DRIVER"}, + "access_key_id": {"env": "AWS_ACCESS_KEY_ID"}, + "secret_access_key": {"env": "AWS_SECRET_ACCESS_KEY"}, + "bucket_name": {"env": "INFRAHUB_STORAGE_BUCKET_NAME"}, + "endpoint_url": {"env": "INFRAHUB_STORAGE_ENDPOINT_URL"}, + "use_ssl": {"env": "INFRAHUB_STORAGE_USE_SSL"}, + "default_acl": {"env": "INFRAHUB_STORAGE_DEFAULT_ACL"}, + "querystring_auth": {"env": "INFRAHUB_STORAGE_QUERYTSTRING_AUTH"}, + "custom_domain": {"env": "INFRAHUB_STORAGE_CUSTOM_DOMAIN"}, } +class StorageSettings(BaseSettings): + driver: StorageDriver = StorageDriver.FileSystemStorage + + local: FileSystemStorageSettings = FileSystemStorageSettings() + s3: S3StorageSettings = S3StorageSettings() + + class Config: + env_prefix = "INFRAHUB_STORAGE" + case_sensitive = False + + class DatabaseSettings(BaseSettings): db_type: DatabaseType = DatabaseType.MEMGRAPH protocol: str = "bolt" @@ -258,6 +293,11 @@ class Config: case_sensitive = False +class Override(BaseModel): + message_bus: Optional[InfrahubMessageBus] = None + cache: Optional[InfrahubCache] = None + + class Settings(BaseSettings): """Main Settings Class for the project.""" @@ -315,3 +355,6 @@ def load_and_exit(config_file_name: str = "infrahub.toml", config_data: Optional for error in err.errors(): print(f" {'/'.join(error['loc'])} | {error['msg']} ({error['type']})") sys.exit(1) + + +OVERRIDE: Override = Override() diff --git a/backend/infrahub/core/attribute.py b/backend/infrahub/core/attribute.py index a99dbf225f..6b779e6ce1 100644 --- a/backend/infrahub/core/attribute.py +++ b/backend/infrahub/core/attribute.py @@ -356,12 +356,12 @@ async def _update(self, db: InfrahubDatabase, at: Optional[Timestamp] = None) -> return True @classmethod - async def get_query_filter( # pylint: disable=unused-argument + async def get_query_filter( # pylint: disable=unused-argument,disable=too-many-branches cls, name: str, filter_name: str, branch: Optional[Branch] = None, - filter_value: Optional[Union[str, int, bool]] = None, + filter_value: Optional[Union[str, int, bool, list]] = None, include_match: bool = True, param_prefix: Optional[str] = None, db: Optional[InfrahubDatabase] = None, @@ -372,9 +372,12 @@ async def get_query_filter( # pylint: disable=unused-argument query_params: Dict[str, Any] = {} query_where: List[str] = [] - if filter_value and not isinstance(filter_value, (str, bool, int)): + if filter_value and not isinstance(filter_value, (str, bool, int, list)): raise TypeError(f"filter {filter_name}: {filter_value} ({type(filter_value)}) is not supported.") + if isinstance(filter_value, list) and not all(isinstance(value, (str, bool, int)) for value in filter_value): + raise TypeError(f"filter {filter_name}: {filter_value} (list) contains unsupported item") + param_prefix = param_prefix or f"attr_{name}" if include_match: @@ -399,6 +402,11 @@ async def get_query_filter( # pylint: disable=unused-argument else: query_filter.append(QueryNode(name="av", labels=["AttributeValue"])) + elif filter_name == "values" and isinstance(filter_value, list): + query_filter.extend((QueryRel(labels=["HAS_VALUE"]), QueryNode(name="av", labels=["AttributeValue"]))) + query_where.append(f"av.value IN ${param_prefix}_value") + query_params[f"{param_prefix}_value"] = filter_value + elif filter_name in cls._flag_properties and filter_value is not None: query_filter.append(QueryRel(labels=[filter_name.upper()])) query_filter.append( @@ -532,9 +540,109 @@ class Boolean(BaseAttribute): type = bool +class Dropdown(BaseAttribute): + type = str + + @property + def color(self) -> str: + """Return the color for the current value""" + color = "" + if self.schema.choices: + selected = [choice for choice in self.schema.choices if choice.name == self.value] + if selected: + color = selected[0].color + + return color + + @property + def description(self) -> str: + """Return the description for the current value""" + if self.schema.choices: + selected = [choice for choice in self.schema.choices if choice.name == self.value] + if selected: + return selected[0].description + + return "" + + @property + def label(self) -> str: + """Return the label for the current value""" + label = "" + if self.schema.choices: + selected = [choice for choice in self.schema.choices if choice.name == self.value] + if selected: + label = selected[0].label + + return label + + @classmethod + def validate_content(cls, value: Any, name: str, schema: AttributeSchema) -> None: + """Validate the content of the dropdown.""" + super().validate_content(value=value, name=name, schema=schema) + values = [choice.name for choice in schema.choices] + if value not in values: + raise ValidationError({name: f"{value} must be one of {', '.join(sorted(values))!r}"}) + + class IPNetwork(BaseAttribute): type = str + @property + def broadcast_address(self) -> Optional[str]: + """Return the broadcast address of the ip network.""" + if not self.value: + return None + return str(ipaddress.ip_network(str(self.value)).broadcast_address) + + @property + def hostmask(self) -> Optional[str]: + """Return the hostmask of the ip network.""" + if not self.value: + return None + return str(ipaddress.ip_network(str(self.value)).hostmask) + + @property + def netmask(self) -> Optional[str]: + """Return the netmask of the ip network.""" + if not self.value: + return None + return str(ipaddress.ip_network(str(self.value)).netmask) + + @property + def prefixlen(self) -> Optional[str]: + """Return the prefix length the ip network.""" + if not self.value: + return None + return str(ipaddress.ip_network(str(self.value)).prefixlen) + + @property + def num_addresses(self) -> Optional[int]: + """Return the number of possible addresses in the ip network.""" + if not self.value: + return None + return int(ipaddress.ip_network(str(self.value)).num_addresses) + + @property + def version(self) -> Optional[int]: + """Return the IP version of the ip network.""" + if not self.value: + return None + return int(ipaddress.ip_network(str(self.value)).version) + + @property + def with_hostmask(self) -> Optional[str]: + """Return the network ip and the associated hostmask of the ip network.""" + if not self.value: + return None + return str(ipaddress.ip_network(str(self.value)).with_hostmask) + + @property + def with_netmask(self) -> Optional[str]: + """Return the network ip and the associated netmask of the ip network.""" + if not self.value: + return None + return str(ipaddress.ip_network(str(self.value)).with_netmask) + @classmethod def validate_format(cls, value: Any, name: str, schema: AttributeSchema) -> None: """Validate the format of the attribute. @@ -564,6 +672,62 @@ def serialize(cls, value: Any) -> Any: class IPHost(BaseAttribute): type = str + @property + def ip(self) -> Optional[str]: + """Return the ip adress without a prefix or subnet mask.""" + if not self.value: + return None + return str(ipaddress.ip_interface(str(self.value)).ip) + + @property + def hostmask(self) -> Optional[str]: + """Return the hostmask of the ip address.""" + if not self.value: + return None + return str(ipaddress.ip_interface(str(self.value)).hostmask) + + @property + def netmask(self) -> Optional[str]: + """Return the netmask of the ip address.""" + if not self.value: + return None + return str(ipaddress.ip_interface(str(self.value)).netmask) + + @property + def network(self) -> Optional[str]: + """Return the network encapsuling the ip address.""" + if not self.value: + return None + return str(ipaddress.ip_interface(str(self.value)).network) + + @property + def prefixlen(self) -> Optional[str]: + """Return the prefix length of the ip address.""" + if not self.value: + return None + return str(ipaddress.ip_interface(str(self.value))._prefixlen) + + @property + def version(self) -> Optional[int]: + """Return the IP version of the ip address.""" + if not self.value: + return None + return int(ipaddress.ip_interface(str(self.value)).version) + + @property + def with_hostmask(self) -> Optional[str]: + """Return the ip address and the associated hostmask of the ip address.""" + if not self.value: + return None + return str(ipaddress.ip_interface(str(self.value)).with_hostmask) + + @property + def with_netmask(self) -> Optional[str]: + """Return the ip address and the associated netmask of the ip address.""" + if not self.value: + return None + return str(ipaddress.ip_interface(str(self.value)).with_netmask) + @classmethod def validate_format(cls, value: Any, name: str, schema: AttributeSchema) -> None: """Validate the format of the attribute. diff --git a/backend/infrahub/core/branch.py b/backend/infrahub/core/branch.py index f837452bb1..dda685dcab 100644 --- a/backend/infrahub/core/branch.py +++ b/backend/infrahub/core/branch.py @@ -39,9 +39,15 @@ from infrahub.core.registry import get_branch, registry from infrahub.core.timestamp import Timestamp from infrahub.core.utils import add_relationship, update_relationships_to -from infrahub.exceptions import BranchNotFound, ValidationError +from infrahub.exceptions import ( + BranchNotFound, + DiffFromRequiredOnDefaultBranchError, + DiffRangeValidationError, + ValidationError, +) from infrahub.message_bus import messages from infrahub.message_bus.responses import DiffNamesResponse +from infrahub.services import services if TYPE_CHECKING: from infrahub.database import InfrahubDatabase @@ -246,7 +252,9 @@ async def delete(self, db: InfrahubDatabase) -> None: def get_query_filter_relationships( self, rel_labels: list, at: Optional[Union[Timestamp, str]] = None, include_outside_parentheses: bool = False ) -> Tuple[List, Dict]: - """Generate a CYPHER Query filter based on a list of relationships to query a part of the graph at a specific time and on a specific branch.""" + """ + Generate a CYPHER Query filter based on a list of relationships to query a part of the graph at a specific time and on a specific branch. + """ filters = [] params = {} @@ -279,7 +287,8 @@ def get_query_filter_relationships( return filters, params def get_query_filter_path(self, at: Optional[Union[Timestamp, str]] = None) -> Tuple[str, Dict]: - """Generate a CYPHER Query filter based on a path to query a part of the graph at a specific time and on a specific branch. + """ + Generate a CYPHER Query filter based on a path to query a part of the graph at a specific time and on a specific branch. Examples: >>> rels_filter, rels_params = self.branch.get_query_filter_path(at=self.at) @@ -355,7 +364,8 @@ def get_query_filter_relationships_diff( diff_from: Timestamp, diff_to: Timestamp, ) -> Tuple[List, Dict]: - """Generate a CYPHER Query filter to query all events that are applicable to a given branch based + """ + Generate a CYPHER Query filter to query all events that are applicable to a given branch based - The time when the branch as created - The branched_from time of the branch - The diff_to and diff_from time as provided @@ -398,7 +408,8 @@ def get_query_filter_range( start_time: Union[Timestamp, str], end_time: Union[Timestamp, str], ) -> Tuple[List, Dict]: - """Generate a CYPHER Query filter to query a range of values in the graph between start_time and end_time.""" + """ + Generate a CYPHER Query filter to query a range of values in the graph between start_time and end_time.""" filters = [] params = {} @@ -443,7 +454,8 @@ async def rebase(self, db: InfrahubDatabase, at: Optional[Union[str, Timestamp]] registry.branch[self.name] = self async def validate_branch(self, db: InfrahubDatabase) -> List[ObjectConflict]: - """Validate if a branch is eligible to be merged. + """ + Validate if a branch is eligible to be merged. - Must be conflict free both for data and repository - All checks must pass - Check schema changes @@ -1029,7 +1041,9 @@ def __init__( self.branch_support = branch_support or [BranchSupportType.AWARE] if not diff_from and self.branch.is_default: - raise ValueError(f"diff_from is mandatory when diffing on the default branch `{self.branch.name}`.") + raise DiffFromRequiredOnDefaultBranchError( + f"diff_from is mandatory when diffing on the default branch `{self.branch.name}`." + ) # If diff from hasn't been provided, we'll use the creation of the branch as the starting point if diff_from: @@ -1041,7 +1055,7 @@ def __init__( self.diff_to = Timestamp(diff_to) if self.diff_to < self.diff_from: - raise ValueError("diff_to must be later than diff_from") + raise DiffRangeValidationError("diff_to must be later than diff_from") # Results organized by Branch self._results: Dict[str, dict] = defaultdict( @@ -1082,7 +1096,9 @@ async def init( ) async def has_conflict( - self, db: InfrahubDatabase, rpc_client: InfrahubRpcClient # pylint: disable=unused-argument + self, + db: InfrahubDatabase, + rpc_client: InfrahubRpcClient, # pylint: disable=unused-argument ) -> bool: """Return True if the same path has been modified on multiple branches. False otherwise""" @@ -1422,7 +1438,11 @@ async def get_modified_paths_repositories_for_branch( return paths async def get_modified_paths_repository( - self, rpc_client: InfrahubRpcClient, repository, commit_from: str, commit_to: str + self, + rpc_client: InfrahubRpcClient, # pylint: disable=unused-argument + repository, + commit_from: str, + commit_to: str, ) -> Set[Tuple]: """Return the path of all the files that have changed for a given repository between 2 commits. @@ -1436,7 +1456,7 @@ async def get_modified_paths_repository( second_commit=commit_to, ) - reply = await rpc_client.rpc(message=message) + reply = await services.service.message_bus.rpc(message=message) diff = reply.parse(response_class=DiffNamesResponse) return {("file", repository.id, filename) for filename in diff.files_changed} @@ -1953,7 +1973,7 @@ async def _calculated_diff_files(self, db: InfrahubDatabase, rpc_client: Infrahu async def get_files_repository( self, - rpc_client: InfrahubRpcClient, + rpc_client: InfrahubRpcClient, # pylint: disable=unused-argument branch_name: str, repository, commit_from: str, @@ -1970,7 +1990,7 @@ async def get_files_repository( second_commit=commit_to, ) - reply = await rpc_client.rpc(message=message) + reply = await services.service.message_bus.rpc(message=message) diff = reply.parse(response_class=DiffNamesResponse) actions = { diff --git a/backend/infrahub/core/constants.py b/backend/infrahub/core/constants.py index 275e391d3f..ac8e61c00c 100644 --- a/backend/infrahub/core/constants.py +++ b/backend/infrahub/core/constants.py @@ -4,7 +4,7 @@ from typing import List from infrahub.exceptions import ValidationError -from infrahub.utils import InfrahubNumberEnum, InfrahubStringEnum +from infrahub.utils import InfrahubStringEnum GLOBAL_BRANCH_NAME = "-global-" @@ -66,19 +66,6 @@ class ContentType(InfrahubStringEnum): TEXT_PLAIN = "text/plain" -class CriticalityLevel(InfrahubNumberEnum): - one = 1 - two = 2 - three = 3 - four = 4 - five = 5 - six = 6 - seven = 7 - eight = 8 - nine = 9 - ten = 1 - - class DiffAction(InfrahubStringEnum): ADDED = "added" REMOVED = "removed" @@ -163,6 +150,12 @@ class RelationshipStatus(InfrahubStringEnum): DELETED = "deleted" +class RelationshipDirection(InfrahubStringEnum): + BIDIR = "bidirectional" + OUTBOUND = "outbound" + INBOUND = "inbound" + + class Severity(InfrahubStringEnum): SUCCESS = "success" INFO = "info" @@ -186,11 +179,12 @@ class ValidatorState(InfrahubStringEnum): RESTRICTED_NAMESPACES: List[str] = [ "Account", "Branch", - "Builtin", + # "Builtin", # "Core", "Deprecated", "Diff", "Infrahub", "Internal", "Lineage", + "Schema", ] diff --git a/backend/infrahub/core/initialization.py b/backend/infrahub/core/initialization.py index 41a4136eb8..0ab7fd0f60 100644 --- a/backend/infrahub/core/initialization.py +++ b/backend/infrahub/core/initialization.py @@ -11,7 +11,7 @@ from infrahub.core.schema_manager import SchemaManager from infrahub.database import InfrahubDatabase from infrahub.exceptions import DatabaseError -from infrahub.storage.local import InfrahubLocalStorage +from infrahub.storage import InfrahubObjectStorage LOGGER = logging.getLogger("infrahub") @@ -42,8 +42,7 @@ async def initialization(db: InfrahubDatabase): # --------------------------------------------------- # Initialize the Storage Driver # --------------------------------------------------- - if config.SETTINGS.storage.driver == config.StorageDriver.LOCAL: - registry.storage = await InfrahubLocalStorage.init(settings=config.SETTINGS.storage.settings) + registry.storage = await InfrahubObjectStorage.init(settings=config.SETTINGS.storage) # --------------------------------------------------- # Load all existing branches into the registry @@ -67,7 +66,8 @@ async def initialization(db: InfrahubDatabase): await registry.schema.load_schema_from_db(db=db, branch=default_branch) if default_branch.update_schema_hash(): LOGGER.warning( - f"{default_branch.name} | New schema detected after pulling the schema from the db : {hash_in_db!r} >> {default_branch.schema_hash.main!r}" + f"{default_branch.name} | New schema detected after pulling the schema from the db :" + f" {hash_in_db!r} >> {default_branch.schema_hash.main!r}" ) for branch in branches: @@ -80,7 +80,8 @@ async def initialization(db: InfrahubDatabase): if branch.update_schema_hash(): LOGGER.warning( - f"{branch.name} | New schema detected after pulling the schema from the db {hash_in_db!r} >> {branch.schema_hash.main!r}" + f"{branch.name} | New schema detected after pulling the schema from the db :" + f" {hash_in_db!r} >> {branch.schema_hash.main!r}" ) # --------------------------------------------------- @@ -201,22 +202,6 @@ async def first_time_initialization(db: InfrahubDatabase): # -------------------------------------------------- # Create Default Users and Groups # -------------------------------------------------- - CRITICALITY_LEVELS = ( - # ("negligible", 1), - ("low", 2), - ("medium", 3), - ("high", 4), - # ("very high", 5), - # ("critical", 6), - # ("very critical", 7), - ) - - criticality_schema = registry.get_schema(name="BuiltinCriticality") - for level in CRITICALITY_LEVELS: - obj = await Node.init(db=db, schema=criticality_schema) - await obj.new(db=db, name=level[0], level=level[1]) - await obj.save(db=db) - token_schema = registry.get_schema(name="InternalAccountToken") # admin_grp = await Node.init(db=db, schema=group_schema) # await admin_grp.new(db=db, name="admin") diff --git a/backend/infrahub/core/manager.py b/backend/infrahub/core/manager.py index 2a542d990a..0ec551b5df 100644 --- a/backend/infrahub/core/manager.py +++ b/backend/infrahub/core/manager.py @@ -212,7 +212,7 @@ async def query_peers( ] @classmethod - async def get_one_by_id_or_default_filter( + async def get_one_by_default_filter( cls, db: InfrahubDatabase, id: str, @@ -228,22 +228,6 @@ async def get_one_by_id_or_default_filter( branch = await get_branch(branch=branch, db=db) at = Timestamp(at) - node = await cls.get_one( - id=id, - fields=fields, - at=at, - branch=branch, - include_owner=include_owner, - include_source=include_source, - db=db, - prefetch_relationships=prefetch_relationships, - account=account, - ) - - if node: - return node - - # Check if there is a default_filter defined for this schema node_schema = registry.get_node_schema(name=schema_name, branch=branch) if not node_schema.default_filter: raise NodeNotFound(branch_name=branch.name, node_type=schema_name, identifier=id) @@ -252,7 +236,7 @@ async def get_one_by_id_or_default_filter( db=db, schema=node_schema, fields=fields, - limit=10, + limit=2, filters={node_schema.default_filter: id}, branch=branch, at=at, @@ -261,8 +245,6 @@ async def get_one_by_id_or_default_filter( prefetch_relationships=prefetch_relationships, account=account, ) - if not items: - raise NodeNotFound(branch_name=branch.name, node_type=schema_name, identifier=id) if len(items) > 1: raise NodeNotFound( @@ -272,7 +254,54 @@ async def get_one_by_id_or_default_filter( message=f"Unable to find node {id!r}, {len(items)} nodes returned, expected 1", ) - return items[0] + return items[0] if items else None + + @classmethod + async def get_one_by_id_or_default_filter( + cls, + db: InfrahubDatabase, + id: str, + schema_name: str, + fields: Optional[dict] = None, + at: Union[Timestamp, str] = None, + branch: Union[Branch, str] = None, + include_source: bool = False, + include_owner: bool = False, + prefetch_relationships: bool = False, + account=None, + ) -> Node: + branch = await get_branch(branch=branch, db=db) + at = Timestamp(at) + + node = await cls.get_one( + id=id, + fields=fields, + at=at, + branch=branch, + include_owner=include_owner, + include_source=include_source, + db=db, + prefetch_relationships=prefetch_relationships, + account=account, + ) + if node: + return node + + node = await cls.get_one_by_default_filter( + db=db, + id=id, + schema_name=schema_name, + fields=fields, + at=at, + branch=branch, + include_source=include_source, + include_owner=include_owner, + prefetch_relationships=prefetch_relationships, + account=account, + ) + if not node: + raise NodeNotFound(branch_name=branch.name, node_type=schema_name, identifier=id) + return node @classmethod async def get_one( @@ -286,7 +315,8 @@ async def get_one( include_owner: bool = False, prefetch_relationships: bool = False, account=None, - ) -> Node: + kind: Optional[str] = None, + ) -> Optional[Node]: """Return one node based on its ID.""" result = await cls.get_many( ids=[id], @@ -303,7 +333,17 @@ async def get_one( if not result: return None - return result[id] + node = result[id] + + if kind and node.get_kind() != kind: + raise NodeNotFound( + branch_name=branch.name, + node_type=kind, + identifier=id, + message=f"Node with id {id} exists, but it is a {node.get_kind()}, not {kind}", + ) + + return node @classmethod async def get_many( # pylint: disable=too-many-branches @@ -331,7 +371,7 @@ async def get_many( # pylint: disable=too-many-branches # Query list of all Attributes query = await NodeListGetAttributeQuery.init( db=db, - ids=ids, + ids=list(nodes_info_by_id.keys()), fields=fields, branch=branch, include_source=include_source, diff --git a/backend/infrahub/core/query/__init__.py b/backend/infrahub/core/query/__init__.py index 21b318e339..9bac148782 100644 --- a/backend/infrahub/core/query/__init__.py +++ b/backend/infrahub/core/query/__init__.py @@ -52,6 +52,12 @@ class QueryElementType(Enum): RELATIONSHIP = "relationship" +class QueryRelDirection(Enum): + BIDIR = "bidirectional" + INBOUND = "inbound" + OUTBOUND = "outbound" + + @dataclass class QueryElement: type: QueryElementType @@ -97,6 +103,17 @@ class QueryNode(QueryElement): @dataclass class QueryRel(QueryElement): type: QueryElementType = QueryElementType.RELATIONSHIP + direction: QueryRelDirection = QueryRelDirection.BIDIR + + def __str__(self): + main_str = "[%s%s%s]" % (self.name or "", self.labels_as_str, self.params_as_str) + + if self.direction == QueryRelDirection.INBOUND: + return "<-%s-" % main_str + if self.direction == QueryRelDirection.OUTBOUND: + return "-%s->" % main_str + + return "-%s-" % main_str class QueryType(Enum): diff --git a/backend/infrahub/core/query/attribute.py b/backend/infrahub/core/query/attribute.py index aa95f193ff..45b362f352 100644 --- a/backend/infrahub/core/query/attribute.py +++ b/backend/infrahub/core/query/attribute.py @@ -57,9 +57,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): MATCH (a { uuid: $attr_uuid }) MATCH (a)-[r:HAS_VALUE]-(av) WHERE %s - """ % ( - "\n AND ".join(rels_filter), - ) + """ % ("\n AND ".join(rels_filter),) self.add_to_query(query) @@ -140,14 +138,11 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): self.params["flag_value"] = getattr(self.attr, self.flag_name) self.params["flag_type"] = self.attr.get_kind() - query = ( - """ + query = """ MATCH (a { uuid: $attr_uuid }) MERGE (flag:Boolean { value: $flag_value }) CREATE (a)-[r:%s { branch: $branch, branch_level: $branch_level, status: "active", from: $at, to: null }]->(flag) - """ - % self.flag_name.upper() - ) + """ % self.flag_name.upper() self.add_to_query(query) self.return_labels = ["a", "flag", "r"] @@ -207,18 +202,18 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): at = self.at or self.attr.at self.params["at"] = at.to_string() - rels_filter, rel_params = self.branch.get_query_filter_relationships(rel_labels=["r1", "r2"], at=at.to_string()) - self.params.update(rel_params) + rels_filter, rels_params = self.branch.get_query_filter_path(at=at.to_string()) + self.params.update(rels_params) - query = """ - MATCH (n { uuid: $node_uuid }) - MATCH (a { uuid: $attr_uuid }) - MATCH (n)-[r1]-(a)-[r2:HAS_VALUE|IS_VISIBLE|IS_PROTECTED|HAS_SOURCE|HAS_OWNER]-(ap) - WHERE %s - """ % ( - "\n AND ".join(rels_filter), + query = ( + """ + MATCH (a:Attribute { uuid: $attr_uuid }) + MATCH p = ((a)-[r2:HAS_VALUE|IS_VISIBLE|IS_PROTECTED|HAS_SOURCE|HAS_OWNER]->(ap)) + WHERE all(r IN relationships(p) WHERE ( %s)) + """ + % rels_filter ) self.add_to_query(query) - self.return_labels = ["n", "a", "ap", "r1", "r2"] + self.return_labels = ["a", "ap", "r2"] diff --git a/backend/infrahub/core/query/diff.py b/backend/infrahub/core/query/diff.py index a88e2d2af7..f292daff92 100644 --- a/backend/infrahub/core/query/diff.py +++ b/backend/infrahub/core/query/diff.py @@ -220,7 +220,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): query = ( """ CALL { - MATCH p = ((src:Node)-[r1:IS_RELATED]->(rel:Relationship)<-[r2:IS_RELATED]-(dst:Node)) + MATCH p = ((src:Node)-[r1:IS_RELATED]-(rel:Relationship)-[r2:IS_RELATED]-(dst:Node)) WHERE (rel.branch_support IN $branch_support AND %s r1.branch = r2.branch AND (r1.to = r2.to OR (r1.to is NULL AND r2.to is NULL)) AND r1.from = r2.from AND r1.status = r2.status AND all(r IN relationships(p) WHERE (r.branch IN $branch_names AND r.from >= $diff_from AND r.from <= $diff_to @@ -231,7 +231,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): } CALL { WITH rel, branch_name - MATCH p = ((sn:Node)-[r1:IS_RELATED]->(rel:Relationship)<-[r2:IS_RELATED]-(dn:Node)) + MATCH p = ((sn:Node)-[r1:IS_RELATED]-(rel:Relationship)-[r2:IS_RELATED]-(dn:Node)) WHERE (rel.branch_support IN $branch_support AND r1.branch = r2.branch AND (r1.to = r2.to OR (r1.to is NULL AND r2.to is NULL)) AND r1.from = r2.from AND r1.status = r2.status AND all(r IN relationships(p) WHERE (r.branch = branch_name AND r.from >= $diff_from AND r.from <= $diff_to @@ -275,7 +275,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): } CALL { WITH rel - MATCH p = ((sn:Node)-[r1]->(rel)<-[r2]-(dn:Node)) + MATCH p = ((sn:Node)-[r1]-(rel)-[r2]-(dn:Node)) WHERE r1.branch = r2.branch AND (r1.to = r2.to OR (r1.to is NULL AND r2.to is NULL)) AND r1.from = r2.from AND r1.status = r2.status AND all(r IN relationships(p) WHERE ( %s )) RETURN rel as rel1, sn as sn1, dn as dn1, r1 as r11, r2 as r21 @@ -288,9 +288,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): r3.branch IN $branch_names AND r3.from >= $diff_from AND r3.from <= $diff_to AND ((r3.to >= $diff_from AND r3.to <= $diff_to) OR r3.to is NULL) ) - """ % "\n AND ".join( - rels_filter - ) + """ % "\n AND ".join(rels_filter) self.add_to_query(query) self.params["branch_names"] = self.branch_names @@ -332,9 +330,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): MATCH (a) WHERE a.uuid IN $ids MATCH (a)-[r:IS_VISIBLE|IS_PROTECTED|HAS_SOURCE|HAS_OWNER|HAS_VALUE]-(ap) WHERE %s - """ % ( - "\n AND ".join(rels_filter), - ) + """ % ("\n AND ".join(rels_filter),) self.add_to_query(query) self.return_labels = ["a", "ap", "r"] @@ -386,9 +382,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): MATCH (a) WHERE a.uuid IN $ids MATCH (a)-[r:IS_VISIBLE|IS_PROTECTED|HAS_SOURCE|HAS_OWNER|HAS_VALUE]-(ap) WHERE %s - """ % ( - "\n AND ".join(rels_filter), - ) + """ % ("\n AND ".join(rels_filter),) self.add_to_query(query) self.return_labels = ["a", "ap", "r"] @@ -444,9 +438,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): MATCH (rl) WHERE rl.uuid IN $ids MATCH (rl)-[r:IS_VISIBLE|IS_PROTECTED|HAS_SOURCE|HAS_OWNER]-(rp) WHERE %s - """ % ( - "\n AND ".join(rels_filter), - ) + """ % ("\n AND ".join(rels_filter),) self.params["at"] = self.at.to_string() diff --git a/backend/infrahub/core/query/node.py b/backend/infrahub/core/query/node.py index 1350f0d4ed..a5b58055ba 100644 --- a/backend/infrahub/core/query/node.py +++ b/backend/infrahub/core/query/node.py @@ -4,6 +4,7 @@ from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Tuple +from infrahub.core.constants import RelationshipDirection from infrahub.core.query import Query, QueryResult, QueryType from infrahub.core.query.subquery import build_subquery_filter, build_subquery_order from infrahub.core.query.utils import find_node_schema @@ -121,7 +122,15 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): relationships.append(await rel.get_create_data(db=db)) self.params["attrs"] = [attr.dict() for attr in attributes] - self.params["rels"] = [rel.dict() for rel in relationships] + self.params["rels_bidir"] = [ + rel.dict() for rel in relationships if rel.direction == RelationshipDirection.BIDIR.value + ] + self.params["rels_out"] = [ + rel.dict() for rel in relationships if rel.direction == RelationshipDirection.OUTBOUND.value + ] + self.params["rels_in"] = [ + rel.dict() for rel in relationships if rel.direction == RelationshipDirection.INBOUND.value + ] self.params["node_prop"] = { "uuid": self.node.id, @@ -158,7 +167,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): CREATE (a)-[:HAS_OWNER { branch: attr.branch, branch_level: attr.branch_level, status: attr.status, from: $at, to: null }]->(peer) ) ) - FOREACH ( rel IN $rels | + FOREACH ( rel IN $rels_bidir | MERGE (d:Node { uuid: rel.destination_id }) CREATE (rl:Relationship { uuid: rel.uuid, name: rel.name, branch_support: rel.branch_support }) CREATE (n)-[:IS_RELATED { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(rl) @@ -176,11 +185,45 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): CREATE (rl)-[:HAS_OWNER { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(peer) ) ) + FOREACH ( rel IN $rels_out | + MERGE (d:Node { uuid: rel.destination_id }) + CREATE (rl:Relationship { uuid: rel.uuid, name: rel.name, branch_support: rel.branch_support }) + CREATE (n)-[:IS_RELATED { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(rl) + CREATE (d)<-[:IS_RELATED { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]-(rl) + MERGE (ip:Boolean { value: rel.is_protected }) + MERGE (iv:Boolean { value: rel.is_visible }) + CREATE (rl)-[:IS_PROTECTED { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(ip) + CREATE (rl)-[:IS_VISIBLE { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(iv) + FOREACH ( prop IN rel.source_prop | + MERGE (peer:Node { uuid: prop.peer_id }) + CREATE (rl)-[:HAS_SOURCE { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(peer) + ) + FOREACH ( prop IN rel.owner_prop | + MERGE (peer:Node { uuid: prop.peer_id }) + CREATE (rl)-[:HAS_OWNER { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(peer) + ) + ) + FOREACH ( rel IN $rels_in | + MERGE (d:Node { uuid: rel.destination_id }) + CREATE (rl:Relationship { uuid: rel.uuid, name: rel.name, branch_support: rel.branch_support }) + CREATE (n)<-[:IS_RELATED { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]-(rl) + CREATE (d)-[:IS_RELATED { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(rl) + MERGE (ip:Boolean { value: rel.is_protected }) + MERGE (iv:Boolean { value: rel.is_visible }) + CREATE (rl)-[:IS_PROTECTED { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(ip) + CREATE (rl)-[:IS_VISIBLE { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(iv) + FOREACH ( prop IN rel.source_prop | + MERGE (peer:Node { uuid: prop.peer_id }) + CREATE (rl)-[:HAS_SOURCE { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(peer) + ) + FOREACH ( prop IN rel.owner_prop | + MERGE (peer:Node { uuid: prop.peer_id }) + CREATE (rl)-[:HAS_OWNER { branch: rel.branch, branch_level: rel.branch_level, status: rel.status, from: $at, to: null }]->(peer) + ) + ) WITH distinct n MATCH (n)-[:HAS_ATTRIBUTE|IS_RELATED]-(rn)-[:HAS_VALUE|IS_RELATED]-(rv) - """ % ":".join( - self.node.get_labels() - ) + """ % ":".join(self.node.get_labels()) self.params["at"] = at.to_string() @@ -275,14 +318,11 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): self.params.update(rel_params) - query = ( - """ + query = """ MATCH (a:Attribute) WHERE a.uuid IN $attrs_ids MATCH (a)-[r1:HAS_VALUE]-(av:AttributeValue) WHERE %s - """ - % rel_filter[0] - ) + """ % rel_filter[0] self.add_to_query(query) self.return_labels = ["a", "av", "r1"] diff --git a/backend/infrahub/core/query/relationship.py b/backend/infrahub/core/query/relationship.py index 80a642c9e5..acd0095bea 100644 --- a/backend/infrahub/core/query/relationship.py +++ b/backend/infrahub/core/query/relationship.py @@ -184,17 +184,28 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): self.query_add_all_node_property_match() + self.params["rel_prop"] = { + "branch": self.branch.name, + "branch_level": self.branch.hierarchy_level, + "status": "active", + "from": self.at.to_string(), + "to": None, + } + arrows = self.schema.get_query_arrows() + r1 = f"{arrows.left.start}[r1:{self.rel_type} $rel_prop ]{arrows.left.end}" + r2 = f"{arrows.right.start}[r2:{self.rel_type} $rel_prop ]{arrows.right.end}" + query_create = """ CREATE (rl:Relationship { uuid: $uuid, name: $name, branch_support: $branch_support }) - CREATE (s)-[r1:%s { branch: $branch, branch_level: $branch_level, status: "active", from: $at, to: null }]->(rl) - CREATE (d)-[r2:%s { branch: $branch, branch_level: $branch_level, status: "active", from: $at, to: null }]->(rl) + CREATE (s)%s(rl) + CREATE (rl)%s(d) MERGE (ip:Boolean { value: $is_protected }) MERGE (iv:Boolean { value: $is_visible }) - CREATE (rl)-[r3:IS_PROTECTED { branch: $branch, branch_level: $branch_level, status: "active", from: $at, to: null }]->(ip) - CREATE (rl)-[r4:IS_VISIBLE { branch: $branch, branch_level: $branch_level, status: "active", from: $at, to: null }]->(iv) + CREATE (rl)-[r3:IS_PROTECTED $rel_prop ]->(ip) + CREATE (rl)-[r4:IS_VISIBLE $rel_prop ]->(iv) """ % ( - self.rel_type, - self.rel_type, + r1, + r2, ) self.add_to_query(query_create) @@ -348,23 +359,34 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): self.params[f"prop_{prop_name}_id"] = element_id_to_id(prop.prop_db_id) self.return_labels.append(f"prop_{prop_name}") + self.params["rel_prop"] = { + "branch": self.branch.name, + "branch_level": self.branch.hierarchy_level, + "status": "deleted", + "from": self.at.to_string(), + "to": None, + } + + arrows = self.schema.get_query_arrows() + r1 = f"{arrows.left.start}[r1:{self.rel_type} $rel_prop ]{arrows.left.end}" + r2 = f"{arrows.right.start}[r2:{self.rel_type} $rel_prop ]{arrows.right.end}" + # ----------------------------------------------------------------------- # Create all the DELETE relationships, including properties # ----------------------------------------------------------------------- query = """ - CREATE (s)-[r1:%s { branch: $branch, branch_level: $branch_level, status: "deleted", from: $at, to: null }]->(rl) - CREATE (d)-[r2:%s { branch: $branch, branch_level: $branch_level, status: "deleted", from: $at, to: null }]->(rl) + CREATE (s)%s(rl) + CREATE (rl)%s(d) """ % ( - self.rel_type, - self.rel_type, + r1, + r2, ) self.add_to_query(query) self.return_labels.extend(["r1", "r2"]) for prop_name, prop in self.data.properties.items(): self.add_to_query( - 'CREATE (prop_%s)-[rel_prop_%s:%s { branch: $branch, branch_level: $branch_level, status: "deleted", from: $at, to: null }]->(rl)' - % (prop_name, prop_name, prop_name.upper()), + "CREATE (prop_%s)-[rel_prop_%s:%s $rel_prop ]->(rl)" % (prop_name, prop_name, prop_name.upper()), ) self.return_labels.append(f"rel_prop_{prop_name}") @@ -390,14 +412,25 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): self.params["rel_id"] = self.rel.id self.params["branch"] = self.branch.name self.params["branch_level"] = self.branch.hierarchy_level + self.params["rel_prop"] = { + "branch": self.branch.name, + "branch_level": self.branch.hierarchy_level, + "status": "deleted", + "from": self.at.to_string(), + "to": None, + } + + arrows = self.schema.get_query_arrows() + r1 = f"{arrows.left.start}[r1:{self.rel_type} $rel_prop ]{arrows.left.end}" + r2 = f"{arrows.right.start}[r2:{self.rel_type} $rel_prop ]{arrows.right.end}" query = """ MATCH (s { uuid: $source_id })-[]-(rl:Relationship {uuid: $rel_id})-[]-(d { uuid: $destination_id }) - CREATE (s)-[r1:%s { branch: $branch, branch_level: $branch_level, status: "deleted", from: $at, to: null }]->(rl) - CREATE (d)-[r2:%s { branch: $branch, branch_level: $branch_level, status: "deleted", from: $at, to: null }]->(rl) + CREATE (s)%s(rl) + CREATE (rl)%s(d) """ % ( - self.rel_type, - self.rel_type, + r1, + r2, ) self.params["at"] = self.at.to_string() @@ -431,21 +464,20 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): # pylint: di self.params["source_id"] = self.source_id self.params["rel_identifier"] = self.schema.identifier - query = ( - """ + arrows = self.schema.get_query_arrows() + + query = """ MATCH (rl { name: $rel_identifier }) CALL { WITH rl - MATCH p = (source:Node { uuid: $source_id })-[f0r1:IS_RELATED]-(rl)-[f0r2:IS_RELATED]-(peer:Node) + MATCH p = (source:Node { uuid: $source_id })%s[f0r1:IS_RELATED]%s(rl:Relationship)%s[f0r2:IS_RELATED]%s(peer:Node) WHERE peer.uuid <> $source_id AND all(r IN relationships(p) WHERE (%s)) RETURN peer as peer, rl as rl1, f0r1 as r1, f0r2 as r2 ORDER BY f0r1.branch_level DESC, f0r2.branch_level DESC, f0r2.from DESC, f0r2.from DESC LIMIT 1 } WITH peer, rl1 as rl, r1, r2 - """ - % branch_filter - ) + """ % (arrows.left.start, arrows.left.end, arrows.right.start, arrows.right.end, branch_filter) self.add_to_query(query) where_clause = ['r1.status = "active"', 'r2.status = "active"'] @@ -508,9 +540,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): # pylint: di MATCH (rl)-[rel_is_visible:IS_VISIBLE]-(is_visible) MATCH (rl)-[rel_is_protected:IS_PROTECTED]-(is_protected) WHERE all(r IN [ rel_is_visible, rel_is_protected] WHERE (%s)) - """ % ( - branch_filter, - ) + """ % (branch_filter,) self.add_to_query(query) @@ -628,14 +658,18 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): self.params.update(rels_params) + arrows = self.schema.get_query_arrows() + r1 = f"{arrows.left.start}[r1:{self.rel.rel_type}]{arrows.left.end}" + r2 = f"{arrows.right.start}[r2:{self.rel.rel_type}]{arrows.right.end}" + query = """ MATCH (s { uuid: $source_id }) MATCH (d { uuid: $destination_id }) - MATCH (s)-[r1:%s]->(rl:Relationship { name: $name })<-[r2:%s]-(d) + MATCH (s)%s(rl:Relationship { name: $name })%s(d) WHERE %s """ % ( - self.rel.rel_type, - self.rel.rel_type, + r1, + r2, "\n AND ".join(rels_filter), ) diff --git a/backend/infrahub/core/query/standard_node.py b/backend/infrahub/core/query/standard_node.py index a9510a46ef..f5f866ad36 100644 --- a/backend/infrahub/core/query/standard_node.py +++ b/backend/infrahub/core/query/standard_node.py @@ -42,9 +42,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): query = """ CREATE (n:%s $node_prop) - """ % ( - node_type - ) + """ % (node_type) self.add_to_query(query=query) self.return_labels = ["n"] @@ -64,9 +62,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): query = """ MATCH (n:%s { uuid: $uuid }) SET n = $node_prop - """ % ( - self.node.get_type(), - ) + """ % (self.node.get_type(),) self.add_to_query(query=query) self.return_labels = ["n"] @@ -82,9 +78,7 @@ async def query_init(self, db: InfrahubDatabase, *args, **kwargs): query = """ MATCH (n:%s { uuid: $uuid }) DETACH DELETE (n) - """ % ( - self.node.get_type() - ) + """ % (self.node.get_type()) self.params["uuid"] = str(self.node_id) self.add_to_query(query) diff --git a/backend/infrahub/core/query/subquery.py b/backend/infrahub/core/query/subquery.py index 54ec4fc750..47c1456150 100644 --- a/backend/infrahub/core/query/subquery.py +++ b/backend/infrahub/core/query/subquery.py @@ -60,7 +60,7 @@ async def build_subquery_filter( rel_names.append(rel_name) field_where.append("all(r IN relationships(p) WHERE (%s))" % branch_filter) - filter_str = f"({node_alias})-" + "-".join([str(item) for item in field_filter]) + filter_str = f"({node_alias})" + "".join([str(item) for item in field_filter]) where_str = " AND ".join(field_where) order_str = ", ".join([f"{rel}.branch_level DESC, {rel}.from DESC" for rel in rel_names]) query = f""" @@ -119,7 +119,7 @@ async def build_subquery_order( field_filter[-1].name = "last" field_where.append("all(r IN relationships(p) WHERE (%s))" % branch_filter) - filter_str = f"({node_alias})-" + "-".join([str(item) for item in field_filter]) + filter_str = f"({node_alias})" + "".join([str(item) for item in field_filter]) where_str = " AND ".join(field_where) order_str = ", ".join([f"{rel}.branch_level DESC, {rel}.from DESC" for rel in rel_names]) diff --git a/backend/infrahub/core/registry.py b/backend/infrahub/core/registry.py index 8176b2b0a1..157b6ea032 100644 --- a/backend/infrahub/core/registry.py +++ b/backend/infrahub/core/registry.py @@ -27,7 +27,7 @@ from infrahub.database import InfrahubDatabase from infrahub.graphql.mutations import BaseAttributeInput from infrahub.graphql.types import InfrahubObject - from infrahub.storage.main import InfrahubObjectStorage + from infrahub.storage import InfrahubObjectStorage from infrahub.types import InfrahubDataType # pylint: disable=too-many-public-methods @@ -55,7 +55,7 @@ class Registry: @property def schema(self) -> SchemaManager: if not self._schema: - raise InitializationError + raise InitializationError() return self._schema diff --git a/backend/infrahub/core/relationship.py b/backend/infrahub/core/relationship.py index 514f51699d..0b5fe40bde 100644 --- a/backend/infrahub/core/relationship.py +++ b/backend/infrahub/core/relationship.py @@ -25,7 +25,7 @@ ) from infrahub.core.timestamp import Timestamp from infrahub.core.utils import update_relationships_to -from infrahub.exceptions import NodeNotFound, ValidationError +from infrahub.exceptions import Error, NodeNotFound, ValidationError if TYPE_CHECKING: from uuid import UUID @@ -51,6 +51,7 @@ class RelationshipCreateData(BaseModel): branch: str branch_level: int branch_support: str + direction: str status: str is_protected: bool is_visible: bool @@ -340,6 +341,10 @@ async def delete(self, db: InfrahubDatabase, at: Optional[Timestamp] = None): ) await query.execute(db=db) result = query.get_result() + if not result: + raise Error( + f"Unable to find the relationship to delete. id: {self.id}, source: {node.id}, destination: {peer.id}" + ) # when we remove a relationship we need to : # - Update the existing relationship if we are on the same branch @@ -412,6 +417,7 @@ async def get_create_data(self, db: InfrahubDatabase): branch=branch.name, destination_id=peer.id, status="active", + direction=self.schema.direction.value, branch_level=self.branch.hierarchy_level, branch_support=self.schema.branch.value, is_protected=self.is_protected, diff --git a/backend/infrahub/core/schema.py b/backend/infrahub/core/schema.py index 323459d3b5..498342071d 100644 --- a/backend/infrahub/core/schema.py +++ b/backend/infrahub/core/schema.py @@ -4,6 +4,7 @@ import enum import hashlib import keyword +import re from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union from infrahub_sdk.utils import duplicates, intersection @@ -18,16 +19,16 @@ BranchConflictKeep, BranchSupportType, ContentType, - CriticalityLevel, FilterSchemaKind, ProposedChangeState, RelationshipCardinality, + RelationshipDirection, RelationshipKind, Severity, ValidatorConclusion, ValidatorState, ) -from infrahub.core.query import QueryNode, QueryRel +from infrahub.core.query import QueryNode, QueryRel, QueryRelDirection from infrahub.core.relationship import Relationship from infrahub.types import ATTRIBUTE_TYPES @@ -49,7 +50,8 @@ RELATIONSHIPS_MAPPING = {"Relationship": Relationship} NODE_KIND_REGEX = r"^[A-Z][a-zA-Z0-9]+$" -NODE_NAME_REGEX = r"" +NODE_NAME_REGEX = r"^[A-Z][a-zA-Z0-9]+$" +NAME_REGEX = r"^[a-z0-9\_]+$" DEFAULT_NAME_MIN_LENGTH = 2 DEFAULT_NAME_MAX_LENGTH = 32 @@ -58,6 +60,33 @@ DEFAULT_DESCRIPTION_LENGTH = 128 DEFAULT_REL_IDENTIFIER_LENGTH = 128 +HTML_COLOR = re.compile(r"#[0-9a-fA-F]{6}\b") + + +class QueryArrow(BaseModel): + start: str + end: str + + +class QueryArrowInband(QueryArrow): + start: str = "<-" + end: str = "-" + + +class QueryArrowOutband(QueryArrow): + start: str = "-" + end: str = "->" + + +class QueryArrowBidir(QueryArrow): + start: str = "-" + end: str = "-" + + +class QueryArrows(BaseModel): + left: QueryArrow + right: QueryArrow + class BaseSchemaModel(BaseModel): _exclude_from_hash: List[str] = [] @@ -253,11 +282,31 @@ class FilterSchema(BaseSchemaModel): _sort_by: List[str] = ["name"] +class DropdownChoice(BaseSchemaModel): + name: str + description: str = "" + color: str = "" + label: str = "" + + _sort_by: List[str] = ["name"] + + @validator("color") + def kind_options( + cls, + v: str, + ) -> str: + if v == "": + return v + if HTML_COLOR.match(v): + return v.lower() + + raise ValueError("Color must be a valid HTML color code") + + class AttributeSchema(BaseSchemaModel): id: Optional[str] - name: str = Field(regex=NODE_NAME_REGEX, min_length=DEFAULT_NAME_MIN_LENGTH, max_length=DEFAULT_NAME_MAX_LENGTH) + name: str = Field(regex=NAME_REGEX, min_length=DEFAULT_NAME_MIN_LENGTH, max_length=DEFAULT_NAME_MAX_LENGTH) kind: str # AttributeKind - namespace: str = "Attribute" label: Optional[str] description: Optional[str] = Field(max_length=DEFAULT_DESCRIPTION_LENGTH) default_value: Optional[Any] @@ -265,11 +314,15 @@ class AttributeSchema(BaseSchemaModel): regex: Optional[str] max_length: Optional[int] min_length: Optional[int] + read_only: bool = False inherited: bool = False unique: bool = False branch: Optional[BranchSupportType] optional: bool = False order_weight: Optional[int] + choices: Optional[List[DropdownChoice]] = Field( + default=None, description="The available choices if the kind is Dropdown." + ) _sort_by: List[str] = ["name"] @@ -282,20 +335,35 @@ def kind_options( raise ValueError(f"Only valid Attribute Kind are : {ATTRIBUTE_KIND_LABELS} ") return v + @root_validator + def validate_dropdown_choices(cls, values: Dict[str, Any]) -> Dict[str, Any]: + """Validate that choices are defined for a dropdown but not for other kinds.""" + if values.get("kind") != "Dropdown" and values.get("choices"): + raise ValueError(f"Can only specify 'choices' for kind=Dropdown: {values['kind'] }") + + if values.get("kind") == "Dropdown" and not values.get("choices"): + raise ValueError("The property 'choices' is required for kind=Dropdown") + + return values + def get_class(self): return ATTRIBUTE_TYPES[self.kind].get_infrahub_class() async def get_query_filter( - self, db: InfrahubDatabase, *args, **kwargs # pylint: disable=unused-argument + self, + db: InfrahubDatabase, + *args, + **kwargs, # pylint: disable=unused-argument ) -> Tuple[List[QueryElement], Dict[str, Any], List[str]]: return await self.get_class().get_query_filter(*args, **kwargs) class RelationshipSchema(BaseSchemaModel): id: Optional[str] - name: str = Field(regex=NODE_NAME_REGEX, min_length=DEFAULT_NAME_MIN_LENGTH, max_length=DEFAULT_NAME_MAX_LENGTH) + name: str = Field(regex=NAME_REGEX, min_length=DEFAULT_NAME_MIN_LENGTH, max_length=DEFAULT_NAME_MAX_LENGTH) peer: str = Field(regex=NODE_KIND_REGEX, min_length=DEFAULT_KIND_MIN_LENGTH, max_length=DEFAULT_KIND_MAX_LENGTH) kind: RelationshipKind = RelationshipKind.GENERIC + direction: RelationshipDirection = RelationshipDirection.BIDIR label: Optional[str] description: Optional[str] = Field(max_length=DEFAULT_DESCRIPTION_LENGTH) identifier: Optional[str] = Field(max_length=DEFAULT_REL_IDENTIFIER_LENGTH) @@ -315,6 +383,16 @@ def get_class(self): async def get_peer_schema(self, branch: Optional[Union[Branch, str]] = None): return registry.schema.get(name=self.peer, branch=branch) + def get_query_arrows(self) -> QueryArrows: + """Return (in 4 parts) the 2 arrows for the relationship R1 and R2 based on the direction of the relationship.""" + + if self.direction == RelationshipDirection.OUTBOUND: + return QueryArrows(left=QueryArrowOutband(), right=QueryArrowOutband()) + if self.direction == RelationshipDirection.INBOUND: + return QueryArrows(left=QueryArrowInband(), right=QueryArrowInband()) + + return QueryArrows(left=QueryArrowOutband(), right=QueryArrowInband()) + async def get_query_filter( self, db: InfrahubDatabase, @@ -341,12 +419,28 @@ async def get_query_filter( if include_match: query_filter.append(QueryNode(name="n")) + # Determine in which direction the relationships should point based on the side of the query + rels_direction = { + "r1": QueryRelDirection.OUTBOUND, + "r2": QueryRelDirection.INBOUND, + } + if self.direction == RelationshipDirection.OUTBOUND: + rels_direction = { + "r1": QueryRelDirection.OUTBOUND, + "r2": QueryRelDirection.OUTBOUND, + } + if self.direction == RelationshipDirection.INBOUND: + rels_direction = { + "r1": QueryRelDirection.INBOUND, + "r2": QueryRelDirection.INBOUND, + } + if filter_name == "id": query_filter.extend( [ - QueryRel(name="r1", labels=[rel_type]), + QueryRel(name="r1", labels=[rel_type], direction=rels_direction["r1"]), QueryNode(name="rl", labels=["Relationship"], params={"name": f"${prefix}_rel_name"}), - QueryRel(name="r2", labels=[rel_type]), + QueryRel(name="r2", labels=[rel_type], direction=rels_direction["r2"]), QueryNode(name="peer", labels=["Node"]), ] ) @@ -360,9 +454,9 @@ async def get_query_filter( if filter_name == "ids": query_filter.extend( [ - QueryRel(name="r1", labels=[rel_type]), + QueryRel(name="r1", labels=[rel_type], direction=rels_direction["r1"]), QueryNode(name="rl", labels=["Relationship"], params={"name": f"${prefix}_rel_name"}), - QueryRel(name="r2", labels=[rel_type]), + QueryRel(name="r2", labels=[rel_type], direction=rels_direction["r2"]), QueryNode(name="peer", labels=["Node"]), ] ) @@ -386,9 +480,9 @@ async def get_query_filter( query_filter.extend( [ - QueryRel(name="r1", labels=[rel_type]), + QueryRel(name="r1", labels=[rel_type], direction=rels_direction["r1"]), QueryNode(name="rl", labels=["Relationship"], params={"name": f"${prefix}_rel_name"}), - QueryRel(name="r2", labels=[rel_type]), + QueryRel(name="r2", labels=[rel_type], direction=rels_direction["r2"]), QueryNode(name="peer", labels=["Node"]), ] ) @@ -432,6 +526,7 @@ class BaseNodeSchema(BaseSchemaModel): include_in_menu: Optional[bool] = Field(default=None) menu_placement: Optional[str] = Field(default=None) icon: Optional[str] = Field(default=None) + label: Optional[str] _exclude_from_hash: List[str] = ["attributes", "relationships"] _sort_by: List[str] = ["name"] @@ -442,6 +537,10 @@ def kind(self) -> str: return self.name return self.namespace + self.name + @property + def menu_title(self) -> str: + return self.label or self.name + def __hash__(self): """Return a hash of the object. Be careful hash generated from hash() have a salt by default and they will not be the same across run""" @@ -572,47 +671,13 @@ def name_is_not_keyword(cls, value: str) -> str: class GenericSchema(BaseNodeSchema): """A Generic can be either an Interface or a Union depending if there are some Attributes or Relationships defined.""" - label: Optional[str] used_by: List[str] = Field(default_factory=list) class NodeSchema(BaseNodeSchema): - label: Optional[str] inherit_from: List[str] = Field(default_factory=list) groups: Optional[List[str]] = Field(default_factory=list) - @root_validator - def unique_names(cls, values): - attr_names = [attr.name for attr in values.get("attributes", [])] - rel_names = [rel.name for rel in values.get("relationships", [])] - - if names_dup := duplicates(attr_names + rel_names): - raise ValueError(f"Names of attributes and relationships must be unique : {names_dup}") - return values - - @root_validator(pre=True) - def generate_identifier( - cls, - values, - ): - for rel in values.get("relationships", []): - if not rel.get("identifier", None) and values.get("namespace") and rel.get("peer"): - identifier = "__".join(sorted([f'{values.get("namespace")}{values.get("name")}', rel.get("peer")])) - rel["identifier"] = identifier.lower() - - return values - - @root_validator(pre=False) - def unique_identifiers( - cls, - values, - ): - identifiers = [rel.identifier for rel in values.get("relationships", [])] - if identifier_dup := duplicates(identifiers): - raise ValueError(f"Identifier of relationships must be unique : {identifier_dup}") - - return values - def inherit_from_interface(self, interface: GenericSchema) -> NodeSchema: existing_inherited_attributes = {item.name: idx for idx, item in enumerate(self.attributes) if item.inherited} existing_inherited_relationships = { @@ -643,7 +708,7 @@ def inherit_from_interface(self, interface: GenericSchema) -> NodeSchema: class GroupSchema(BaseSchemaModel): id: Optional[str] - name: str = Field(regex=NODE_NAME_REGEX, min_length=DEFAULT_NAME_MIN_LENGTH, max_length=DEFAULT_NAME_MAX_LENGTH) + name: str = Field(regex=NAME_REGEX, min_length=DEFAULT_NAME_MIN_LENGTH, max_length=DEFAULT_NAME_MAX_LENGTH) kind: str = Field(regex=NODE_KIND_REGEX, min_length=DEFAULT_KIND_MIN_LENGTH, max_length=DEFAULT_KIND_MAX_LENGTH) description: Optional[str] = Field(max_length=DEFAULT_DESCRIPTION_LENGTH) @@ -668,7 +733,7 @@ class SchemaExtension(BaseSchemaModel): class SchemaRoot(BaseModel): - version: Optional[str] + version: Optional[str] = Field(default=None) generics: List[GenericSchema] = Field(default_factory=list) nodes: List[NodeSchema] = Field(default_factory=list) groups: List[GroupSchema] = Field(default_factory=list) @@ -700,6 +765,7 @@ def validate_namespaces(self) -> List[str]: # TODO need to investigate how we could generate the internal schema # directly from the Pydantic Models to avoid the duplication of effort internal_schema = { + "version": None, "nodes": [ { "name": "Node", @@ -711,7 +777,7 @@ def validate_namespaces(self) -> List[str]: { "name": "name", "kind": "Text", - "description": "Node name, must be unique and must be all lowercase.", + "description": "Node name, must be unique within a namespace and must start with an uppercase letter.", "unique": True, "regex": str(NODE_NAME_REGEX), "min_length": DEFAULT_NAME_MIN_LENGTH, @@ -720,6 +786,7 @@ def validate_namespaces(self) -> List[str]: { "name": "namespace", "kind": "Text", + "description": "Node Namespace, Namespaces are used to organize models into logical groups and to prevent name collisions.", "regex": str(NODE_KIND_REGEX), "min_length": DEFAULT_KIND_MIN_LENGTH, "max_length": DEFAULT_KIND_MAX_LENGTH, @@ -734,13 +801,14 @@ def validate_namespaces(self) -> List[str]: { "name": "description", "kind": "Text", - # "description": "", + "description": "Short description of the model, will be visible in the frontend.", "optional": True, "max_length": DEFAULT_DESCRIPTION_LENGTH, }, { "name": "branch", "kind": "Text", + "description": "Type of branch support for the model.", "enum": BranchSupportType.available_types(), "default_value": BranchSupportType.AWARE.value, "optional": True, @@ -748,6 +816,7 @@ def validate_namespaces(self) -> List[str]: { "name": "default_filter", "kind": "Text", + "regex": str(NAME_REGEX), "description": "Default filter used to search for a node in addition to its ID.", "optional": True, }, @@ -791,7 +860,7 @@ def validate_namespaces(self) -> List[str]: { "name": "groups", "kind": "List", - "description": "List of Group that this node is part of", + "description": "List of Group that this Node is part of.", "optional": True, }, ], @@ -800,6 +869,7 @@ def validate_namespaces(self) -> List[str]: "name": "attributes", "peer": "SchemaAttribute", "kind": "Component", + "description": "List of supported Attributes for the Node.", "identifier": "schema__node__attributes", "cardinality": "many", "branch": BranchSupportType.AWARE.value, @@ -809,6 +879,7 @@ def validate_namespaces(self) -> List[str]: "name": "relationships", "peer": "SchemaRelationship", "kind": "Component", + "description": "List of supported Relationships for the Node.", "identifier": "schema__node__relationships", "cardinality": "many", "branch": BranchSupportType.AWARE.value, @@ -825,47 +896,112 @@ def validate_namespaces(self) -> List[str]: "attributes": [ { "name": "name", + "description": "Attribute name, must be unique within a model and must be all lowercase.", "kind": "Text", - "regex": str(NODE_NAME_REGEX), + "regex": str(NAME_REGEX), "min_length": DEFAULT_KIND_MIN_LENGTH, "max_length": DEFAULT_KIND_MAX_LENGTH, }, { - "name": "namespace", + "name": "kind", "kind": "Text", - "regex": str(NODE_KIND_REGEX), + "description": "Defines the type of the attribute.", + "enum": ATTRIBUTE_KIND_LABELS, "min_length": DEFAULT_KIND_MIN_LENGTH, "max_length": DEFAULT_KIND_MAX_LENGTH, + }, + { + "name": "enum", + "kind": "List", + "description": "Define a list of valid values for the attribute.", "optional": True, }, { - "name": "kind", + "name": "choices", + "kind": "List", + "description": "Define a list of valid choices for a dropdown attribute.", + "optional": True, + }, + { + "name": "regex", "kind": "Text", - "enum": ATTRIBUTE_KIND_LABELS, - "min_length": DEFAULT_KIND_MIN_LENGTH, - "max_length": DEFAULT_KIND_MAX_LENGTH, + "description": "Regex uses to limit limit the characters allowed in for the attributes.", + "optional": True, + }, + { + "name": "max_length", + "kind": "Number", + "description": "Set a maximum number of characters allowed for a given attribute.", + "optional": True, + }, + { + "name": "min_length", + "kind": "Number", + "description": "Set a minimum number of characters allowed for a given attribute.", + "optional": True, + }, + { + "name": "label", + "kind": "Text", + "optional": True, + "description": "Human friendly representation of the name. Will be autogenerated if not provided", + "max_length": DEFAULT_NAME_MAX_LENGTH, + }, + { + "name": "description", + "kind": "Text", + "optional": True, + "description": "Short description of the attribute.", + "max_length": DEFAULT_DESCRIPTION_LENGTH, + }, + { + "name": "read_only", + "kind": "Boolean", + "description": "Set the attribute as Read-Only, users won't be able to change its value. " + "Mainly relevant for internal object.", + "default_value": False, + "optional": True, + }, + { + "name": "unique", + "kind": "Boolean", + "description": "Indicate if the value of this attribute must be unique in the database for a given model.", + "default_value": False, + "optional": True, + }, + { + "name": "optional", + "kind": "Boolean", + "description": "Indicate if this attribute is mandatory or optional.", + "default_value": True, + "optional": True, }, - {"name": "enum", "kind": "List", "optional": True}, - {"name": "regex", "kind": "Text", "optional": True}, - {"name": "max_length", "kind": "Number", "optional": True}, - {"name": "min_length", "kind": "Number", "optional": True}, - {"name": "label", "kind": "Text", "optional": True, "max_length": DEFAULT_NAME_MAX_LENGTH}, - {"name": "description", "kind": "Text", "optional": True, "max_length": DEFAULT_DESCRIPTION_LENGTH}, - {"name": "unique", "kind": "Boolean", "default_value": False, "optional": True}, - {"name": "optional", "kind": "Boolean", "default_value": True, "optional": True}, { "name": "branch", "kind": "Text", + "description": "Type of branch support for the attribute, if not defined it will be inherited from the node.", "enum": BranchSupportType.available_types(), "optional": True, }, - {"name": "order_weight", "kind": "Number", "optional": True}, + { + "name": "order_weight", + "kind": "Number", + "description": "Number used to order the attribute in the frontend (table and view).", + "optional": True, + }, { "name": "default_value", "kind": "Any", + "description": "Default value of the attribute.", + "optional": True, + }, + { + "name": "inherited", + "kind": "Boolean", + "default_value": False, + "description": "Internal value to indicate if the attribute was inherited from a Generic node.", "optional": True, }, - {"name": "inherited", "kind": "Boolean", "default_value": False, "optional": True}, ], "relationships": [ { @@ -889,13 +1025,15 @@ def validate_namespaces(self) -> List[str]: { "name": "name", "kind": "Text", - "regex": str(NODE_NAME_REGEX), + "description": "Relationship name, must be unique within a model and must be all lowercase.", + "regex": str(NAME_REGEX), "min_length": DEFAULT_KIND_MIN_LENGTH, "max_length": DEFAULT_KIND_MAX_LENGTH, }, { "name": "peer", "kind": "Text", + "description": "Type (kind) of objects supported on the other end of the relationship.", "regex": str(NODE_KIND_REGEX), "min_length": DEFAULT_KIND_MIN_LENGTH, "max_length": DEFAULT_KIND_MAX_LENGTH, @@ -903,32 +1041,77 @@ def validate_namespaces(self) -> List[str]: { "name": "kind", "kind": "Text", + "description": "Defines the type of the relationship.", "enum": RelationshipKind.available_types(), - "default_value": "Generic", + "default_value": RelationshipKind.GENERIC.value, + }, + { + "name": "label", + "kind": "Text", + "description": "Human friendly representation of the name. Will be autogenerated if not provided", + "optional": True, + "max_length": DEFAULT_NAME_MAX_LENGTH, + }, + { + "name": "description", + "kind": "Text", + "optional": True, + "description": "Short description of the relationship.", + "max_length": DEFAULT_DESCRIPTION_LENGTH, + }, + { + "name": "identifier", + "kind": "Text", + "description": "Unique identifier of the relationship within a model," + " identifiers must match to traverse a relationship on both direction.", + "regex": str(NAME_REGEX), + "max_length": DEFAULT_REL_IDENTIFIER_LENGTH, + "optional": True, + }, + { + "name": "cardinality", + "kind": "Text", + "description": "Defines how many objects are expected on the other side of the relationship.", + "enum": RelationshipCardinality.available_types(), + "default_value": RelationshipCardinality.MANY.value, + "optional": True, + }, + { + "name": "order_weight", + "kind": "Number", + "description": "Number used to order the relationship in the frontend (table and view).", + "optional": True, }, - {"name": "label", "kind": "Text", "optional": True, "max_length": DEFAULT_NAME_MAX_LENGTH}, - {"name": "description", "kind": "Text", "optional": True, "max_length": DEFAULT_DESCRIPTION_LENGTH}, - {"name": "identifier", "kind": "Text", "max_length": DEFAULT_REL_IDENTIFIER_LENGTH, "optional": True}, - {"name": "cardinality", "kind": "Text", "enum": RelationshipCardinality.available_types()}, - {"name": "order_weight", "kind": "Number", "optional": True}, { "name": "optional", "kind": "Boolean", + "description": "Indicate if this relationship is mandatory or optional.", "default_value": False, "optional": True, }, { "name": "branch", "kind": "Text", + "description": "Type of branch support for the relatioinship, if not defined it will be determine based both peers.", "enum": BranchSupportType.available_types(), "optional": True, }, { "name": "inherited", "kind": "Boolean", + "description": "Internal value to indicate if the relationship was inherited from a Generic node.", "default_value": False, "optional": True, }, + { + "name": "direction", + "kind": "Text", + "description": "Defines the direction of the relationship, " + " Unidirectional relationship are required when the same model is on both side.", + "enum": RelationshipDirection.available_types(), + "default_value": RelationshipDirection.BIDIR.value, + "optional": True, + }, ], "relationships": [ { @@ -952,6 +1135,7 @@ def validate_namespaces(self) -> List[str]: { "name": "name", "kind": "Text", + "description": "Generic name, must be unique within a namespace and must start with an uppercase letter.", "unique": True, "regex": str(NODE_NAME_REGEX), "min_length": DEFAULT_NAME_MIN_LENGTH, @@ -960,6 +1144,7 @@ def validate_namespaces(self) -> List[str]: { "name": "namespace", "kind": "Text", + "description": "Generic Namespace, Namespaces are used to organize models into logical groups and to prevent name collisions.", "regex": str(NODE_KIND_REGEX), "min_length": DEFAULT_KIND_MIN_LENGTH, "max_length": DEFAULT_KIND_MAX_LENGTH, @@ -967,12 +1152,14 @@ def validate_namespaces(self) -> List[str]: { "name": "label", "kind": "Text", + "description": "Human friendly representation of the name/kind", "optional": True, "max_length": 32, }, { "name": "branch", "kind": "Text", + "description": "Type of branch support for the model.", "enum": BranchSupportType.available_types(), "default_value": BranchSupportType.AWARE.value, "optional": True, @@ -981,6 +1168,7 @@ def validate_namespaces(self) -> List[str]: "name": "default_filter", "kind": "Text", "description": "Default filter used to search for a node in addition to its ID.", + "regex": str(NAME_REGEX), "optional": True, }, { @@ -1000,7 +1188,7 @@ def validate_namespaces(self) -> List[str]: "kind": "Boolean", "description": "Defines if objects of this kind should be included in the menu.", "default_value": True, - "optional": False, + "optional": True, }, { "name": "menu_placement", @@ -1014,7 +1202,13 @@ def validate_namespaces(self) -> List[str]: "description": "Defines the icon to be used for this object type.", "optional": True, }, - {"name": "description", "kind": "Text", "optional": True, "max_length": DEFAULT_DESCRIPTION_LENGTH}, + { + "name": "description", + "kind": "Text", + "optional": True, + "description": "Short description of the Generic.", + "max_length": DEFAULT_DESCRIPTION_LENGTH, + }, { "name": "used_by", "kind": "List", @@ -1052,7 +1246,6 @@ def validate_namespaces(self) -> List[str]: "name": "name", "kind": "Text", "unique": True, - "regex": str(NODE_NAME_REGEX), "min_length": DEFAULT_NAME_MIN_LENGTH, "max_length": DEFAULT_NAME_MAX_LENGTH, }, @@ -1063,10 +1256,16 @@ def validate_namespaces(self) -> List[str]: "min_length": DEFAULT_KIND_MIN_LENGTH, "max_length": DEFAULT_KIND_MAX_LENGTH, }, - {"name": "description", "kind": "Text", "optional": True, "max_length": DEFAULT_DESCRIPTION_LENGTH}, + { + "name": "description", + "kind": "Text", + "description": "Short description of the Group.", + "optional": True, + "max_length": DEFAULT_DESCRIPTION_LENGTH, + }, ], }, - ] + ], } core_models = { @@ -1352,30 +1551,13 @@ def validate_namespaces(self) -> List[str]: "description": "Group of nodes of any kind.", "include_in_menu": True, "icon": "mdi:account-group", - "label": "StandardGroup", + "label": "Standard Group", "default_filter": "name__value", "order_by": ["name__value"], "display_labels": ["name__value"], "branch": BranchSupportType.AWARE.value, "inherit_from": ["CoreGroup"], }, - { - "name": "Criticality", - "namespace": "Builtin", - "description": "Level of criticality expressed from 1 to 10.", - "include_in_menu": True, - "icon": "mdi:alert-octagon-outline", - "label": "Criticality", - "default_filter": "name__value", - "order_by": ["name__value"], - "display_labels": ["name__value"], - "branch": BranchSupportType.AWARE.value, - "attributes": [ - {"name": "name", "kind": "Text", "unique": True}, - {"name": "level", "kind": "Number", "enum": CriticalityLevel.available_types()}, - {"name": "description", "kind": "Text", "optional": True}, - ], - }, { "name": "Tag", "namespace": "Builtin", @@ -1392,26 +1574,6 @@ def validate_namespaces(self) -> List[str]: {"name": "description", "kind": "Text", "optional": True}, ], }, - { - "name": "Organization", - "namespace": "Core", - "description": "An organization represent a legal entity, a company.", - "include_in_menu": True, - "label": "Organization", - "icon": "mdi:domain", - "default_filter": "name__value", - "order_by": ["name__value"], - "display_labels": ["label__value"], - "branch": BranchSupportType.AWARE.value, - "attributes": [ - {"name": "name", "kind": "Text", "unique": True}, - {"name": "label", "kind": "Text", "optional": True}, - {"name": "description", "kind": "Text", "optional": True}, - ], - "relationships": [ - {"name": "tags", "peer": "BuiltinTag", "kind": "Attribute", "optional": True, "cardinality": "many"}, - ], - }, { "name": "Account", "namespace": "Core", @@ -1666,59 +1828,6 @@ def validate_namespaces(self) -> List[str]: }, ], }, - { - "name": "Status", - "namespace": "Builtin", - "description": "Represent the status of an object: active, maintenance", - "include_in_menu": True, - "icon": "mdi:list-status", - "label": "Status", - "default_filter": "name__value", - "order_by": ["name__value"], - "display_labels": ["label__value"], - "branch": BranchSupportType.AWARE.value, - "attributes": [ - {"name": "name", "kind": "Text", "unique": True}, - {"name": "label", "kind": "Text", "optional": True}, - {"name": "description", "kind": "Text", "optional": True}, - ], - }, - { - "name": "Role", - "namespace": "Builtin", - "description": "Represent the role of an object", - "include_in_menu": True, - "icon": "mdi:ballot", - "label": "Role", - "default_filter": "name__value", - "order_by": ["name__value"], - "display_labels": ["label__value"], - "branch": BranchSupportType.AWARE.value, - "attributes": [ - {"name": "name", "kind": "Text", "unique": True}, - {"name": "label", "kind": "Text", "optional": True}, - {"name": "description", "kind": "Text", "optional": True}, - ], - }, - { - "name": "Location", - "namespace": "Builtin", - "description": "A location represent a physical element: a building, a site, a city", - "include_in_menu": True, - "icon": "mdi:map-marker-radius-outline", - "label": "Location", - "default_filter": "name__value", - "order_by": ["name__value"], - "display_labels": ["name__value"], - "attributes": [ - {"name": "name", "kind": "Text", "unique": True}, - {"name": "description", "kind": "Text", "optional": True}, - {"name": "type", "kind": "Text"}, - ], - "relationships": [ - {"name": "tags", "peer": "BuiltinTag", "kind": "Attribute", "optional": True, "cardinality": "many"}, - ], - }, { "name": "Repository", "namespace": "Core", @@ -1885,6 +1994,34 @@ def validate_namespaces(self) -> List[str]: }, ], }, + { + "name": "UserValidator", + "namespace": "Core", + "description": "A Validator related to a user defined checks in a repository", + "include_in_menu": False, + "label": "User Validator", + "display_labels": ["label__value"], + "inherit_from": ["CoreValidator"], + "branch": BranchSupportType.AGNOSTIC.value, + "relationships": [ + { + "name": "check_definition", + "peer": "CoreCheckDefinition", + "kind": "Attribute", + "optional": False, + "cardinality": "one", + "branch": BranchSupportType.AGNOSTIC.value, + }, + { + "name": "repository", + "peer": "CoreRepository", + "kind": "Attribute", + "optional": False, + "cardinality": "one", + "branch": BranchSupportType.AGNOSTIC.value, + }, + ], + }, { "name": "SchemaValidator", "namespace": "Core", @@ -2009,12 +2146,14 @@ def validate_namespaces(self) -> List[str]: "name": "depth", "kind": "Number", "description": "number of nested levels in the query", + "read_only": True, "optional": True, }, { "name": "height", "kind": "Number", "description": "total number of fields requested in the query", + "read_only": True, "optional": True, }, ], diff --git a/backend/infrahub/core/schema_manager.py b/backend/infrahub/core/schema_manager.py index c2bde953ca..524028fd3d 100644 --- a/backend/infrahub/core/schema_manager.py +++ b/backend/infrahub/core/schema_manager.py @@ -5,7 +5,7 @@ from collections import defaultdict from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union -from infrahub_sdk.utils import intersection +from infrahub_sdk.utils import duplicates, intersection from pydantic import BaseModel, Field import infrahub.config as config @@ -13,9 +13,11 @@ from infrahub.core import get_branch, get_branch_from_registry from infrahub.core.constants import ( RESERVED_ATTR_REL_NAMES, + RESTRICTED_NAMESPACES, BranchSupportType, FilterSchemaKind, RelationshipCardinality, + RelationshipDirection, RelationshipKind, ) from infrahub.core.manager import NodeManager @@ -35,6 +37,8 @@ from infrahub.exceptions import SchemaNotFound from infrahub.graphql import generate_graphql_schema from infrahub.log import get_logger +from infrahub.utils import format_label +from infrahub.visuals import select_color log = get_logger() @@ -45,7 +49,7 @@ from infrahub.core.branch import Branch from infrahub.database import InfrahubDatabase -# pylint: disable=redefined-builtin,too-many-public-methods +# pylint: disable=redefined-builtin,too-many-public-methods,too-many-lines INTERNAL_SCHEMA_NODE_KINDS = [node["namespace"] + node["name"] for node in internal_schema["nodes"]] SUPPORTED_SCHEMA_NODE_TYPE = [ @@ -62,6 +66,7 @@ "Integer": FilterSchemaKind.NUMBER, "Boolean": FilterSchemaKind.BOOLEAN, "Checkbox": FilterSchemaKind.BOOLEAN, + "Dropdown": FilterSchemaKind.TEXT, } @@ -75,6 +80,11 @@ def all(self) -> List[str]: return self.changed + self.added + self.removed +class SchemaNamespace(BaseModel): + name: str + user_editable: bool + + class SchemaBranch: def __init__(self, cache: Dict, name: Optional[str] = None, data: Optional[Dict[str, int]] = None): self._cache: Dict[str, Union[NodeSchema, GenericSchema, GroupSchema]] = cache @@ -194,6 +204,20 @@ def get_all(self, include_internal: bool = False) -> Dict[str, Union[NodeSchema, if include_internal or name not in INTERNAL_SCHEMA_NODE_KINDS } + def get_namespaces(self, include_internal: bool = False) -> List[SchemaNamespace]: + all_schemas = self.get_all(include_internal=include_internal) + namespaces: Dict[str, SchemaNamespace] = {} + for schema in all_schemas.values(): + if isinstance(schema, GroupSchema): + continue + if schema.namespace in namespaces: + continue + namespaces[schema.namespace] = SchemaNamespace( + name=schema.namespace, user_editable=schema.namespace not in RESTRICTED_NAMESPACES + ) + + return list(namespaces.values()) + def load_schema(self, schema: SchemaRoot) -> None: """Load a SchemaRoot object and store all NodeSchema, GenericSchema or GroupSchema. @@ -214,16 +238,28 @@ def load_schema(self, schema: SchemaRoot) -> None: self.set(name=node_extension.kind, schema=new_item) def process(self) -> None: + self.process_pre_validation() + self.process_validate() + self.process_post_validation() + + def process_pre_validation(self) -> None: self.generate_identifiers() - self.validate_names() - self.validate_menu_placements() self.process_default_values() self.process_inheritance() self.process_branch_support() + + def process_validate(self) -> None: + self.validate_names() + self.validate_menu_placements() + self.validate_kinds() + self.validate_identifiers() + + def process_post_validation(self) -> None: self.add_groups() self.process_filters() self.generate_weight() self.process_labels() + self.process_dropdowns() def generate_identifiers(self) -> None: """Generate the identifier for all relationships if it's not already present.""" @@ -238,10 +274,69 @@ def generate_identifiers(self) -> None: self.set(name=name, schema=node) + def validate_identifiers(self) -> None: + """Validate that all relationships have a unique identifier for a given model.""" + # Organize all the relationships per identifier and node + rels_per_identifier: Dict[str, Dict[str, List[RelationshipSchema]]] = defaultdict(lambda: defaultdict(list)) + for name in list(self.nodes.keys()) + list(self.generics.keys()): + node = self.get(name=name) + + for rel in node.relationships: + rels_per_identifier[rel.identifier][name].append(rel) + + valid_options = [ + [RelationshipDirection.BIDIR, RelationshipDirection.BIDIR], + sorted([RelationshipDirection.INBOUND, RelationshipDirection.OUTBOUND]), + ] + + for identifier, rels_per_kind in rels_per_identifier.items(): + # Per node kind, check if the directions are good + for _, rels in rels_per_kind.items(): + directions = sorted([rel.direction.value for rel in rels]) + if not (len(rels) == 1 or (len(rels) == 2 and directions == ["inbound", "outbound"])): + names_directions = [(rel.name, rel.direction.value) for rel in rels] + raise ValueError( + f"{node.kind}: Identifier of relationships must be unique for a given direction > {identifier!r} : {names_directions}" + ) from None + + # Continue if no other model is using this identifier + if len(rels_per_kind) == 1: + continue + + # If this node has 2 relationships, BIDIRECTIONAL is not a valid option on the remote node + if len(rels) == 2: + for rel in rels: + if ( + rel.peer in list(rels_per_kind.keys()) + and len(rels_per_kind[rel.peer]) == 1 + and rels_per_kind[rel.peer][0].direction == RelationshipDirection.BIDIR + ): + raise ValueError( + f"{node.kind}: Incompatible direction detected on Reverse Relationship for {rel.name!r} ({identifier!r}) " + f" > {RelationshipDirection.BIDIR.value} " + ) from None + + elif ( + len(rels) == 1 + and rels[0].peer in list(rels_per_kind.keys()) + and len(rels_per_kind[rels[0].peer]) == 1 + ): + peer_direction = rels_per_kind[rels[0].peer][0].direction + if sorted([peer_direction, rels[0].direction]) not in valid_options: + raise ValueError( + f"{node.kind}: Incompatible direction detected on Reverse Relationship for {rels[0].name!r} ({identifier!r})" + f" {rels[0].direction.value} <> {peer_direction.value}" + ) from None + def validate_names(self) -> None: for name in list(self.nodes.keys()) + list(self.generics.keys()): node = self.get(name=name) + if names_dup := duplicates(node.attribute_names + node.relationship_names): + raise ValueError( + f"{node.kind}: Names of attributes and relationships must be unique : {names_dup}" + ) from None + if node.kind in INTERNAL_SCHEMA_NODE_KINDS: continue @@ -261,6 +356,55 @@ def validate_menu_placements(self) -> None: except SchemaNotFound: raise ValueError(f"{node.kind}: {node.menu_placement} is not a valid menu placement") from None + def validate_kinds(self) -> None: + for name in list(self.nodes.keys()): + node = self.get(name=name) + + for generic_kind in node.inherit_from: + if self.has(name=generic_kind): + if not isinstance(self.get(name=generic_kind), GenericSchema): + raise ValueError( + f"{node.kind}: Only generic model can be used as part of inherit_from, {generic_kind!r} is not a valid entry." + ) from None + else: + raise ValueError( + f"{node.kind}: {generic_kind!r} is not a invalid Generic to inherit from" + ) from None + + for rel in node.relationships: + if rel.peer in ["CoreGroup"]: + continue + if not self.has(rel.peer): + raise ValueError( + f"{node.kind}: Relationship {rel.name!r} is referencing an invalid peer {rel.peer!r}" + ) from None + + def process_dropdowns(self) -> None: + for name in list(self.nodes.keys()) + list(self.generics.keys()): + node = self.get(name=name) + + changed = False + + attributes = [attr for attr in node.attributes if attr.kind == "Dropdown"] + for attr in attributes: + if not attr.choices: + continue + + sorted_choices = sorted(attr.choices or [], key=lambda x: x.name, reverse=True) + defined_colors = [choice.color for choice in sorted_choices if choice.color] + for choice in sorted_choices: + if not choice.color: + choice.color = select_color(defined_colors) + if not choice.label: + choice.label = format_label(choice.name) + + if attr.choices != sorted_choices: + attr.choices = sorted_choices + changed = True + + if changed: + self.set(name=name, schema=node) + def process_labels(self) -> None: for name in list(self.nodes.keys()) + list(self.generics.keys()): node = self.get(name=name) @@ -268,17 +412,17 @@ def process_labels(self) -> None: changed = False if not node.label: - node.label = " ".join([word.title() for word in node.name.split("_")]) + node.label = format_label(node.name) changed = True for attr in node.attributes: if not attr.label: - attr.label = " ".join([word.title() for word in attr.name.split("_")]) + attr.label = format_label(attr.name) changed = True for rel in node.relationships: if not rel.label: - rel.label = " ".join([word.title() for word in rel.name.split("_")]) + rel.label = format_label(rel.name) changed = True if changed: @@ -451,6 +595,7 @@ def generate_filters( filter.enum = attr.enum filters.append(filter) + filters.append(FilterSchema(name=f"{attr.name}__values", kind=FilterSchemaKind.LIST)) for flag_prop in FlagPropertyMixin._flag_properties: filters.append(FilterSchema(name=f"{attr.name}__{flag_prop}", kind=FilterSchemaKind.BOOLEAN)) @@ -485,6 +630,7 @@ def generate_filters( filter.enum = attr.enum filters.append(filter) + filters.append(FilterSchema(name=f"{rel.name}__{attr.name}__values", kind=FilterSchemaKind.LIST)) for flag_prop in FlagPropertyMixin._flag_properties: filters.append( @@ -593,7 +739,7 @@ async def update_schema_branch( self._branches[branch.name] = updated_schema - def register_schema(self, schema: SchemaRoot, branch: str = None) -> SchemaBranch: + def register_schema(self, schema: SchemaRoot, branch: Optional[str] = None) -> SchemaBranch: """Register all nodes, generics & groups from a SchemaRoot object into the registry.""" branch = branch or config.SETTINGS.main.default_branch diff --git a/backend/infrahub/core/utils.py b/backend/infrahub/core/utils.py index d97f7b3d1a..5948d9466e 100644 --- a/backend/infrahub/core/utils.py +++ b/backend/infrahub/core/utils.py @@ -21,16 +21,13 @@ async def add_relationship( at: Optional[Timestamp] = None, status=RelationshipStatus.ACTIVE, ): - create_rel_query = ( - """ + create_rel_query = """ MATCH (s) WHERE ID(s) = $src_node_id MATCH (d) WHERE ID(d) = $dst_node_id WITH s,d CREATE (s)-[r:%s { branch: $branch, branch_level: $branch_level, from: $at, to: null, status: $status }]->(d) RETURN ID(r) - """ - % str(rel_type).upper() - ) + """ % str(rel_type).upper() at = Timestamp(at) diff --git a/backend/infrahub/exceptions.py b/backend/infrahub/exceptions.py index 4d90ccf4a2..12a359d406 100644 --- a/backend/infrahub/exceptions.py +++ b/backend/infrahub/exceptions.py @@ -156,6 +156,8 @@ def __init__(self, message: Optional[str] = None): class SchemaNotFound(Error): + HTTP_CODE: int = 422 + def __init__(self, branch_name, identifier, message=None): self.branch_name = branch_name self.identifier = identifier @@ -185,6 +187,13 @@ def __str__(self): """ +class QueryValidationError(Error): + HTTP_CODE = 400 + + def __init__(self, message: str): + self.message = message + + class ValidationError(Error): def __init__(self, input_value): self.message: Optional[str] = None @@ -215,3 +224,18 @@ def __str__(self): return ", ".join([f"{message} at {location}" for location, message in self.messages.items()]) return f"{self.message} at {self.location or ''}" + + +class DiffError(Error): + HTTP_CODE = 400 + + def __init__(self, message: str): + self.message = message + + +class DiffRangeValidationError(DiffError): + ... + + +class DiffFromRequiredOnDefaultBranchError(DiffError): + ... diff --git a/backend/infrahub/git/__init__.py b/backend/infrahub/git/__init__.py index a5f7825be6..f701575655 100644 --- a/backend/infrahub/git/__init__.py +++ b/backend/infrahub/git/__init__.py @@ -7,7 +7,6 @@ GraphQLQueryInformation, InfrahubRepository, RepoFileInformation, - RFileInformation, TransformPythonInformation, Worktree, extract_repo_file_information, @@ -20,7 +19,6 @@ "TEMPORARY_DIRECTORY_NAME", "ArtifactGenerateResult", "InfrahubRepository", - "RFileInformation", "TransformPythonInformation", "CheckDefinitionInformation", "RepoFileInformation", diff --git a/backend/infrahub/git/actions.py b/backend/infrahub/git/actions.py index 8751d28859..35aa3fcc00 100644 --- a/backend/infrahub/git/actions.py +++ b/backend/infrahub/git/actions.py @@ -1,25 +1,28 @@ import logging -from infrahub_sdk import InfrahubClient - from infrahub import lock from infrahub.exceptions import RepositoryError +from infrahub.services import InfrahubServices from .repository import InfrahubRepository LOGGER = logging.getLogger("infrahub.git") -async def sync_remote_repositories(client: InfrahubClient) -> None: - branches = await client.branch.all() - repositories = await client.get_list_repositories(branches=branches) +async def sync_remote_repositories(service: InfrahubServices) -> None: + branches = await service.client.branch.all() + repositories = await service.client.get_list_repositories(branches=branches) for repo_name, repository in repositories.items(): async with lock.registry.get(name=repo_name, namespace="repository"): init_failed = False try: repo = await InfrahubRepository.init( - id=repository.id, name=repository.name, location=repository.location, client=client + service=service, + id=repository.id, + name=repository.name, + location=repository.location, + client=service.client, ) except RepositoryError as exc: LOGGER.error(exc) @@ -28,7 +31,11 @@ async def sync_remote_repositories(client: InfrahubClient) -> None: if init_failed: try: repo = await InfrahubRepository.new( - id=repository.id, name=repository.name, location=repository.location, client=client + service=service, + id=repository.id, + name=repository.name, + location=repository.location, + client=service.client, ) await repo.import_objects_from_files(branch_name=repo.default_branch_name) except RepositoryError as exc: diff --git a/backend/infrahub/git/repository.py b/backend/infrahub/git/repository.py index e2662a2551..4f8839a99c 100644 --- a/backend/infrahub/git/repository.py +++ b/backend/infrahub/git/repository.py @@ -24,13 +24,16 @@ InfrahubRepositoryConfig, ValidationError, ) +from infrahub_sdk.schema import ( + InfrahubCheckDefinitionConfig, + InfrahubPythonTransformConfig, + InfrahubRepositoryRFileConfig, +) from infrahub_sdk.utils import YamlFile, compare_lists -from pydantic import BaseModel +from pydantic import BaseModel, Field, validator from pydantic import ValidationError as PydanticValidationError -from pydantic import validator import infrahub.config as config -from infrahub.checks import INFRAHUB_CHECK_VARIABLE_TO_IMPORT, InfrahubCheck from infrahub.exceptions import ( CheckError, CommitNotFoundError, @@ -40,10 +43,12 @@ TransformError, ) from infrahub.log import get_logger -from infrahub.transforms import INFRAHUB_TRANSFORM_VARIABLE_TO_IMPORT +from infrahub.services import InfrahubServices if TYPE_CHECKING: from infrahub_sdk.branch import BranchData + from infrahub_sdk.checks import InfrahubCheck + from infrahub_sdk.schema import InfrahubRepositoryArtifactDefinitionConfig from infrahub.message_bus import messages # pylint: disable=too-few-public-methods,too-many-lines @@ -93,34 +98,6 @@ class GraphQLQueryInformation(BaseModel): """Query in string format""" -class RFileInformation(BaseModel): - name: str - """Name of the RFile""" - - description: Optional[str] - """Description of the RFile""" - - query: str - """ID or name of the GraphQL Query associated with this RFile""" - - repository: str = "self" - """ID of the associated repository or self""" - - template_path: str - """Path to the template file within the repo""" - - -class ArtifactDefinitionInformation(BaseModel): - name: str - """Name of the Artifact Definition""" - - artifact_name: Optional[str] - parameters: dict - content_type: str - targets: str - transformation: str - - class CheckDefinitionInformation(BaseModel): name: str """Name of the check""" @@ -149,6 +126,12 @@ class CheckDefinitionInformation(BaseModel): parameters: Optional[dict] = None """Additional Parameters to extract from each target (if targets is provided)""" + targets: Optional[str] = Field(default=None, description="Targets if not a global check") + + +class InfrahubRepositoryRFile(InfrahubRepositoryRFileConfig): + repository: str + class TransformPythonInformation(BaseModel): name: str @@ -220,7 +203,8 @@ def extract_repo_file_information( Args: full_filename (str): Absolute path to the file to load Example:/opt/infrahub/git/repo01/commits/71da[..]4b7/myfile.py root_directory: Absolute path to the root of the repository directory. Example:/opt/infrahub/git/repo01 - worktree_directory (str, optional): Absolute path to the root of the worktree directory. Defaults to None. example: /opt/infrahub/git/repo01/commits/71da[..]4b7/ + worktree_directory (str, optional): Absolute path to the root of the worktree directory. Defaults to None. + Example: /opt/infrahub/git/repo01/commits/71da[..]4b7/ Returns: RepoFileInformation: Pydantic object to store all information about this file @@ -340,6 +324,7 @@ class InfrahubRepository(BaseModel): # pylint: disable=too-many-public-methods client: Optional[InfrahubClient] cache_repo: Optional[Repo] + service: InfrahubServices class Config: arbitrary_types_allowed = True @@ -513,15 +498,17 @@ async def create_locally(self) -> bool: return True @classmethod - async def new(cls, **kwargs): - self = cls(**kwargs) + async def new(cls, service: Optional[InfrahubServices] = None, **kwargs): + service = service or InfrahubServices() + self = cls(service=service, **kwargs) await self.create_locally() LOGGER.info(f"{self.name} | Created the new project locally.") return self @classmethod - async def init(cls, **kwargs): - self = cls(**kwargs) + async def init(cls, service: Optional[InfrahubServices] = None, **kwargs): + service = service or InfrahubServices() + self = cls(service=service, **kwargs) self.validate_local_directories() LOGGER.debug(f"{self.name} | Initiated the object on an existing directory.") return self @@ -1018,12 +1005,19 @@ async def import_objects_from_files(self, branch_name: str, commit: Optional[str if not commit: commit = self.get_commit_value(branch_name=branch_name) - await self.import_schema_files(branch_name=branch_name, commit=commit) + config_file = await self.get_repository_config(branch_name=branch_name, commit=commit) + + if config_file: + await self.import_schema_files(branch_name=branch_name, commit=commit, config_file=config_file) + await self.import_all_graphql_query(branch_name=branch_name, commit=commit) - await self.import_all_python_files(branch_name=branch_name, commit=commit) - await self.import_all_yaml_files(branch_name=branch_name, commit=commit) - async def import_objects_rfiles(self, branch_name: str, commit: str, data: List[dict]): + if config_file: + await self.import_all_python_files(branch_name=branch_name, commit=commit, config_file=config_file) + await self.import_rfiles(branch_name=branch_name, commit=commit, config_file=config_file) + await self.import_artifact_definitions(branch_name=branch_name, commit=commit, config_file=config_file) + + async def import_rfiles(self, branch_name: str, commit: str, config_file: InfrahubRepositoryConfig): LOGGER.debug(f"{self.name} | Importing all RFiles in branch {branch_name} ({commit}) ") schema = await self.client.schema.get(kind="CoreRFile", branch=branch_name) @@ -1033,13 +1027,14 @@ async def import_objects_rfiles(self, branch_name: str, commit: str, data: List[ for rfile in await self.client.filters(kind="CoreRFile", branch=branch_name, repository__ids=[str(self.id)]) } - local_rfiles = {} + local_rfiles: Dict[str, InfrahubRepositoryRFile] = {} # Process the list of local RFile to organize them by name - for rfile in data: + for config_rfile in config_file.rfiles: try: - item = RFileInformation(**rfile) - self.client.schema.validate_data_against_schema(schema=schema, data=rfile) + self.client.schema.validate_data_against_schema( + schema=schema, data=config_rfile.dict(exclude_none=True) + ) except PydanticValidationError as exc: for error in exc.errors(): LOGGER.error(f" {'/'.join(error['loc'])} | {error['msg']} ({error['type']})") @@ -1048,17 +1043,15 @@ async def import_objects_rfiles(self, branch_name: str, commit: str, data: List[ LOGGER.error(exc.message) continue - # Insert the ID of the current repository if required - if item.repository == "self": - item.repository = self.id + rfile = InfrahubRepositoryRFile(repository=str(self.id), **config_rfile.dict()) # Query the GraphQL query and (eventually) replace the name with the ID graphql_query = await self.client.get( - kind="CoreGraphQLQuery", branch=branch_name, id=str(item.query), populate_store=True + kind="CoreGraphQLQuery", branch=branch_name, id=str(rfile.query), populate_store=True ) - item.query = graphql_query.id + rfile.query = graphql_query.id - local_rfiles[item.name] = item + local_rfiles[rfile.name] = rfile present_in_both, only_graph, only_local = compare_lists( list1=list(rfiles_in_graph.keys()), list2=list(local_rfiles.keys()) @@ -1083,17 +1076,17 @@ async def import_objects_rfiles(self, branch_name: str, commit: str, data: List[ LOGGER.info(f"{self.name} | RFile '{rfile_name}' not found locally in branch {branch_name}, deleting") await rfiles_in_graph[rfile_name].delete() - async def create_rfile(self, branch_name: str, data: RFileInformation) -> InfrahubNode: + async def create_rfile(self, branch_name: str, data: InfrahubRepositoryRFile) -> InfrahubNode: schema = await self.client.schema.get(kind="CoreRFile", branch=branch_name) create_payload = self.client.schema.generate_payload_create( - schema=schema, data=data.dict(), source=self.id, is_protected=True + schema=schema, data=data.payload, source=self.id, is_protected=True ) obj = await self.client.create(kind="CoreRFile", branch=branch_name, **create_payload) await obj.save() return obj @classmethod - async def compare_rfile(cls, existing_rfile: InfrahubNode, local_rfile: RFileInformation) -> bool: + async def compare_rfile(cls, existing_rfile: InfrahubNode, local_rfile: InfrahubRepositoryRFile) -> bool: # pylint: disable=no-member if ( existing_rfile.description.value != local_rfile.description @@ -1104,7 +1097,7 @@ async def compare_rfile(cls, existing_rfile: InfrahubNode, local_rfile: RFileInf return True - async def update_rfile(self, existing_rfile: InfrahubNode, local_rfile: RFileInformation) -> None: + async def update_rfile(self, existing_rfile: InfrahubNode, local_rfile: InfrahubRepositoryRFile) -> None: # pylint: disable=no-member if existing_rfile.description.value != local_rfile.description: existing_rfile.description.value = local_rfile.description @@ -1112,12 +1105,12 @@ async def update_rfile(self, existing_rfile: InfrahubNode, local_rfile: RFileInf if existing_rfile.query.id != local_rfile.query: existing_rfile.query = {"id": local_rfile.query, "source": str(self.id), "is_protected": True} - if existing_rfile.template_path.value != local_rfile.template_path: - existing_rfile.template_path.value = local_rfile.template_path + if existing_rfile.template_path.value != local_rfile.template_path_value: + existing_rfile.template_path.value = local_rfile.template_path_value await existing_rfile.save() - async def import_objects_artifact_definitions(self, branch_name: str, commit: str, data: List[dict]): + async def import_artifact_definitions(self, branch_name: str, commit: str, config_file: InfrahubRepositoryConfig): LOGGER.debug(f"{self.name} | Importing all Artifact Definitions in branch {branch_name} ({commit}) ") schema = await self.client.schema.get(kind="CoreArtifactDefinition", branch=branch_name) @@ -1127,13 +1120,12 @@ async def import_objects_artifact_definitions(self, branch_name: str, commit: st for artdef in await self.client.filters(kind="CoreArtifactDefinition", branch=branch_name) } - local_artifact_defs = {} + local_artifact_defs: Dict[str, InfrahubRepositoryArtifactDefinitionConfig] = {} # Process the list of local RFile to organize them by name - for artdef in data: + for artdef in config_file.artifact_definitions: try: - item = ArtifactDefinitionInformation(**artdef) - self.client.schema.validate_data_against_schema(schema=schema, data=artdef) + self.client.schema.validate_data_against_schema(schema=schema, data=artdef.dict(exclude_none=True)) except PydanticValidationError as exc: for error in exc.errors(): LOGGER.error(f" {'/'.join(error['loc'])} | {error['msg']} ({error['type']})") @@ -1142,7 +1134,7 @@ async def import_objects_artifact_definitions(self, branch_name: str, commit: st LOGGER.error(exc.message) continue - local_artifact_defs[item.name] = item + local_artifact_defs[artdef.name] = artdef present_in_both, _, only_local = compare_lists( list1=list(artifact_defs_in_graph.keys()), list2=list(local_artifact_defs.keys()) @@ -1167,7 +1159,9 @@ async def import_objects_artifact_definitions(self, branch_name: str, commit: st local_artifact_definition=local_artifact_defs[artdef_name], ) - async def create_artifact_definition(self, branch_name: str, data: ArtifactDefinitionInformation) -> InfrahubNode: + async def create_artifact_definition( + self, branch_name: str, data: InfrahubRepositoryArtifactDefinitionConfig + ) -> InfrahubNode: schema = await self.client.schema.get(kind="CoreArtifactDefinition", branch=branch_name) create_payload = self.client.schema.generate_payload_create( schema=schema, data=data.dict(), source=self.id, is_protected=True @@ -1178,7 +1172,9 @@ async def create_artifact_definition(self, branch_name: str, data: ArtifactDefin @classmethod async def compare_artifact_definition( - cls, existing_artifact_definition: InfrahubNode, local_artifact_definition: RFileInformation + cls, + existing_artifact_definition: InfrahubNode, + local_artifact_definition: InfrahubRepositoryArtifactDefinitionConfig, ) -> bool: # pylint: disable=no-member if ( @@ -1188,8 +1184,12 @@ async def compare_artifact_definition( ): return False + return True + async def update_artifact_definition( - self, existing_artifact_definition: InfrahubNode, local_artifact_definition: RFileInformation + self, + existing_artifact_definition: InfrahubNode, + local_artifact_definition: InfrahubRepositoryArtifactDefinitionConfig, ) -> None: # pylint: disable=no-member if existing_artifact_definition.artifact_name.value != local_artifact_definition.artifact_name: @@ -1229,14 +1229,10 @@ async def get_repository_config(self, branch_name: str, commit: str) -> Optional ) return - async def import_schema_files(self, branch_name: str, commit: str) -> None: + async def import_schema_files(self, branch_name: str, commit: str, config_file: InfrahubRepositoryConfig) -> None: # pylint: disable=too-many-branches - config_file = await self.get_repository_config(branch_name=branch_name, commit=commit) branch_wt = self.get_worktree(identifier=commit or branch_name) - if not config_file: - return - schemas_data: List[YamlFile] = [] for schema in config_file.schemas: @@ -1366,44 +1362,52 @@ async def create_graphql_query(self, branch_name: str, name: str, query_string: await obj.save() return obj - async def import_python_check_definitions_from_module( - self, branch_name: str, commit: str, module: types.ModuleType, file_path: str + async def import_python_check_definitions( + self, branch_name: str, commit: str, config_file: InfrahubRepositoryConfig ) -> None: - if INFRAHUB_CHECK_VARIABLE_TO_IMPORT not in dir(module): - return False + commit_wt = self.get_worktree(identifier=commit) + branch_wt = self.get_worktree(identifier=commit or branch_name) - checks_definition_in_graph = { - check.name.value: check - for check in await self.client.filters( - kind="CoreCheckDefinition", branch=branch_name, repository__ids=[str(self.id)] - ) - } + # Ensure the path for this repository is present in sys.path + if self.directory_root not in sys.path: + sys.path.append(self.directory_root) - local_check_definitions = {} - for check_class in getattr(module, INFRAHUB_CHECK_VARIABLE_TO_IMPORT): - graphql_query = await self.client.get( - kind="CoreGraphQLQuery", branch=branch_name, id=str(check_class.query), populate_store=True + checks = [] + for check in config_file.check_definitions: + LOGGER.debug(self.name, import_type="check_definition", file=check.file_path) + + file_info = extract_repo_file_information( + full_filename=os.path.join(branch_wt.directory, check.file_path.as_posix()), + repo_directory=self.directory_root, + worktree_directory=commit_wt.directory, ) try: - item = CheckDefinitionInformation( - name=check_class.__name__, - repository=str(self.id), - class_name=check_class.__name__, - check_class=check_class, - file_path=file_path, - query=str(graphql_query.id), - timeout=check_class.timeout, - rebase=check_class.rebase, - ) - local_check_definitions[item.name] = item - except Exception as exc: # pylint: disable=broad-exception-caught - LOGGER.error( - f"{self.name} | An error occured while processing the CheckDefinition {check_class.__name__} from {file_path} : {exc} " + module = importlib.import_module(file_info.module_name) + except ModuleNotFoundError as exc: + LOGGER.warning( + self.name, import_type="check_definition", file=check.file_path.as_posix(), error=str(exc) ) continue + checks.extend( + await self.get_check_definition( + branch_name=branch_name, + module=module, + file_path=file_info.relative_path_file, + check_definition=check, + ) + ) + + local_check_definitions = {check.name: check for check in checks} + check_definition_in_graph = { + check.name.value: check + for check in await self.client.filters( + kind="CoreCheckDefinition", branch=branch_name, repository__ids=[str(self.id)] + ) + } + present_in_both, only_graph, only_local = compare_lists( - list1=list(checks_definition_in_graph.keys()), list2=list(local_check_definitions.keys()) + list1=list(check_definition_in_graph.keys()), list2=list(local_check_definitions.keys()) ) for check_name in only_local: @@ -1417,21 +1421,166 @@ async def import_python_check_definitions_from_module( for check_name in present_in_both: if not await self.compare_python_check_definition( check=local_check_definitions[check_name], - existing_check=checks_definition_in_graph[check_name], + existing_check=check_definition_in_graph[check_name], ): LOGGER.info( f"{self.name} | New version of CheckDefinition '{check_name}' found on branch {branch_name} ({commit[:8]}), updating" ) await self.update_python_check_definition( check=local_check_definitions[check_name], - existing_check=checks_definition_in_graph[check_name], + existing_check=check_definition_in_graph[check_name], ) for check_name in only_graph: LOGGER.info( f"{self.name} | CheckDefinition '{check_name}' not found locally in branch {branch_name}, deleting" ) - await checks_definition_in_graph[check_name].delete() + await check_definition_in_graph[check_name].delete() + + async def import_python_transforms( + self, branch_name: str, commit: str, config_file: InfrahubRepositoryConfig + ) -> None: + commit_wt = self.get_worktree(identifier=commit) + branch_wt = self.get_worktree(identifier=commit or branch_name) + + # Ensure the path for this repository is present in sys.path + if self.directory_root not in sys.path: + sys.path.append(self.directory_root) + + transforms = [] + for transform in config_file.python_transforms: + LOGGER.debug(self.name, import_type="python_transform", file=transform.file_path) + + file_info = extract_repo_file_information( + full_filename=os.path.join(branch_wt.directory, transform.file_path.as_posix()), + repo_directory=self.directory_root, + worktree_directory=commit_wt.directory, + ) + try: + module = importlib.import_module(file_info.module_name) + except ModuleNotFoundError as exc: + LOGGER.warning( + self.name, import_type="python_transform", file=transform.file_path.as_posix(), error=str(exc) + ) + continue + + transforms.extend( + await self.get_python_transforms( + branch_name=branch_name, + module=module, + file_path=file_info.relative_path_file, + transform=transform, + ) + ) + + local_transform_definitions = {transform.name: transform for transform in transforms} + transform_definition_in_graph = { + transform.name.value: transform + for transform in await self.client.filters( + kind="CoreTransformPython", branch=branch_name, repository__ids=[str(self.id)] + ) + } + + present_in_both, only_graph, only_local = compare_lists( + list1=list(transform_definition_in_graph.keys()), list2=list(local_transform_definitions.keys()) + ) + + for transform_name in only_local: + LOGGER.info( + f"{self.name} | New TransformPython '{transform_name}' found on branch {branch_name} ({commit[:8]}), creating" + ) + await self.create_python_transform( + branch_name=branch_name, transform=local_transform_definitions[transform_name] + ) + + for transform_name in present_in_both: + if not await self.compare_python_transform( + local_transform=local_transform_definitions[transform_name], + existing_transform=transform_definition_in_graph[transform_name], + ): + LOGGER.info( + f"{self.name} | New version of TransformPython '{transform_name}' found on branch {branch_name} ({commit[:8]}), updating" + ) + await self.update_python_transform( + local_transform=local_transform_definitions[transform_name], + existing_transform=transform_definition_in_graph[transform_name], + ) + + for transform_name in only_graph: + LOGGER.info( + f"{self.name} | TransformPython '{transform_name}' not found locally in branch {branch_name}, deleting" + ) + await transform_definition_in_graph[transform_name].delete() + + async def get_check_definition( + self, + branch_name: str, + module: types.ModuleType, + file_path: str, + check_definition: InfrahubCheckDefinitionConfig, + ) -> List[CheckDefinitionInformation]: + if check_definition.class_name not in dir(module): + return [] + + checks = [] + check_class = getattr(module, check_definition.class_name) + graphql_query = await self.client.get( + kind="CoreGraphQLQuery", branch=branch_name, id=str(check_class.query), populate_store=True + ) + try: + checks.append( + CheckDefinitionInformation( + name=check_definition.name, + repository=str(self.id), + class_name=check_definition.class_name, + check_class=check_class, + file_path=file_path, + query=str(graphql_query.id), + timeout=check_class.timeout, + rebase=check_class.rebase, + parameters=check_definition.parameters, + targets=check_definition.targets, + ) + ) + + except Exception as exc: # pylint: disable=broad-exception-caught + LOGGER.error( + f"{self.name} | An error occured while processing the CheckDefinition {check_class.__name__} from {file_path} : {exc} " + ) + return checks + + async def get_python_transforms( + self, branch_name: str, module: types.ModuleType, file_path: str, transform: InfrahubPythonTransformConfig + ) -> List[TransformPythonInformation]: + if transform.class_name not in dir(module): + return [] + + transforms = [] + transform_class = getattr(module, transform.class_name) + graphql_query = await self.client.get( + kind="CoreGraphQLQuery", branch=branch_name, id=str(transform_class.query), populate_store=True + ) + try: + transforms.append( + TransformPythonInformation( + name=transform.name, + repository=str(self.id), + class_name=transform.class_name, + transform_class=transform_class, + file_path=file_path, + query=str(graphql_query.id), + timeout=transform_class.timeout, + rebase=transform_class.rebase, + url=transform_class.url, + ) + ) + + except Exception as exc: # pylint: disable=broad-exception-caught + LOGGER.error( + f"{self.name} | An error occured while processing the PythonTransform {transform.name} from {file_path} : {exc} " + ) + + return transforms async def create_python_check_definition(self, branch_name: str, check: CheckDefinitionInformation) -> InfrahubNode: data = { @@ -1445,6 +1594,9 @@ async def create_python_check_definition(self, branch_name: str, check: CheckDef "parameters": check.parameters, } + if check.targets: + data["targets"] = check.targets + schema = await self.client.schema.get(kind="CoreCheckDefinition", branch=branch_name) create_payload = self.client.schema.generate_payload_create( @@ -1498,70 +1650,6 @@ async def compare_python_check_definition( return False return True - async def import_python_transforms_from_module(self, branch_name: str, commit: str, module, file_path: str): - # TODO add function to validate if a check is valid - - if INFRAHUB_TRANSFORM_VARIABLE_TO_IMPORT not in dir(module): - return False - - transforms_in_graph = { - transform.name.value: transform - for transform in await self.client.filters( - kind="CoreTransformPython", branch=branch_name, repository__ids=[str(self.id)] - ) - } - - local_transforms = {} - - for transform_class in getattr(module, INFRAHUB_TRANSFORM_VARIABLE_TO_IMPORT): - transform = transform_class() - - # Query the GraphQL query and (eventually) replace the name with the ID - graphql_query = await self.client.get( - kind="CoreGraphQLQuery", branch=branch_name, id=str(transform.query), populate_store=True - ) - - item = TransformPythonInformation( - name=transform.name, - repository=str(self.id), - query=str(graphql_query.id), - file_path=file_path, - url=transform.url, - transform_class=transform, - class_name=transform_class.__name__, - rebase=transform.rebase, - timeout=transform.timeout, - ) - local_transforms[item.name] = item - - present_in_both, only_graph, only_local = compare_lists( - list1=list(transforms_in_graph.keys()), list2=list(local_transforms.keys()) - ) - - for transform_name in only_local: - LOGGER.info( - f"{self.name} | New Python Transform '{transform_name}' found on branch {branch_name} ({commit[:8]}), creating" - ) - await self.create_python_transform(branch_name=branch_name, transform=local_transforms[transform_name]) - - for transform_name in present_in_both: - if not await self.compare_python_transform( - existing_transform=transforms_in_graph[transform_name], local_transform=local_transforms[transform_name] - ): - LOGGER.info( - f"{self.name} | New version of the Python Transform '{transform_name}' found on branch {branch_name} ({commit[:8]}), updating" - ) - await self.update_python_transform( - existing_transform=transforms_in_graph[transform_name], - local_transform=local_transforms[transform_name], - ) - - for transform_name in only_graph: - LOGGER.info( - f"{self.name} | Python Transform '{transform_name}' not found locally in branch {branch_name} ({commit[:8]}), deleting" - ) - await transforms_in_graph[transform_name].delete() - async def create_python_transform(self, branch_name: str, transform: TransformPythonInformation) -> InfrahubNode: schema = await self.client.schema.get(kind="CoreTransformPython", branch=branch_name) data = { @@ -1586,7 +1674,7 @@ async def create_python_transform(self, branch_name: str, transform: TransformPy async def update_python_transform( self, existing_transform: InfrahubNode, local_transform: TransformPythonInformation - ) -> bool: + ) -> None: if existing_transform.query.id != local_transform.query: existing_transform.query = {"id": local_transform.query, "source": str(self.id), "is_protected": True} @@ -1618,68 +1706,9 @@ async def compare_python_transform( return False return True - async def import_all_yaml_files(self, branch_name: str, commit: str, exclude: Optional[List[str]] = None): - yaml_files = await self.find_files(extension=["yml", "yaml"], commit=commit) - - for yaml_file in yaml_files: - LOGGER.debug(f"{self.name} | Checking {yaml_file}") - - # ------------------------------------------------------ - # Import Yaml - # ------------------------------------------------------ - with open(yaml_file, "r", encoding="UTF-8") as file_data: - yaml_data = file_data.read() - - try: - data = yaml.safe_load(yaml_data) - except yaml.YAMLError as exc: - LOGGER.warning(f"{self.name} | Unable to load YAML file {yaml_file} : {exc}") - continue - - if not isinstance(data, dict): - LOGGER.debug(f"{self.name} | {yaml_file} : payload is not a dictionnary .. SKIPPING") - continue - - # ------------------------------------------------------ - # Search for Valid object types - # ------------------------------------------------------ - for key, data in data.items(): - if exclude and key in exclude: - continue - if not hasattr(self, f"import_objects_{key}"): - continue - - method = getattr(self, f"import_objects_{key}") - await method(branch_name=branch_name, commit=commit, data=data) - - async def import_all_python_files(self, branch_name: str, commit: str): - commit_wt = self.get_worktree(identifier=commit) - - python_files = await self.find_files(extension=["py"], commit=commit) - - # Ensure the path for this repository is present in sys.path - if self.directory_root not in sys.path: - sys.path.append(self.directory_root) - - for python_file in python_files: - LOGGER.debug(f"{self.name} | Checking {python_file}") - - file_info = extract_repo_file_information( - full_filename=python_file, repo_directory=self.directory_root, worktree_directory=commit_wt.directory - ) - - try: - module = importlib.import_module(file_info.module_name) - except ModuleNotFoundError: - LOGGER.warning(f"{self.name} | Unable to load python file {python_file}") - continue - - await self.import_python_check_definitions_from_module( - branch_name=branch_name, commit=commit, module=module, file_path=file_info.relative_path_file - ) - await self.import_python_transforms_from_module( - branch_name=branch_name, commit=commit, module=module, file_path=file_info.relative_path_file - ) + async def import_all_python_files(self, branch_name: str, commit: str, config_file: InfrahubRepositoryConfig): + await self.import_python_check_definitions(branch_name=branch_name, commit=commit, config_file=config_file) + await self.import_python_transforms(branch_name=branch_name, commit=commit, config_file=config_file) async def find_files( self, @@ -1753,7 +1782,13 @@ async def render_jinja2_template(self, commit: str, location: str, data: dict): raise TransformError(repository_name=self.name, commit=commit, location=location, message=str(exc)) from exc async def execute_python_check( - self, branch_name: str, commit: str, location: str, class_name: str, client: InfrahubClient + self, + branch_name: str, + commit: str, + location: str, + class_name: str, + client: InfrahubClient, + params: Optional[Dict] = None, ) -> InfrahubCheck: """Execute A Python Check stored in the repository.""" @@ -1774,9 +1809,11 @@ async def execute_python_check( module = importlib.import_module(file_info.module_name) - check_class = getattr(module, class_name) + check_class: InfrahubCheck = getattr(module, class_name) - check = await check_class.init(root_directory=commit_worktree.directory, branch=branch_name, client=client) + check = await check_class.init( + root_directory=commit_worktree.directory, branch=branch_name, client=client, params=params + ) await check.run() return check diff --git a/backend/infrahub/graphql/__init__.py b/backend/infrahub/graphql/__init__.py index d8fda35f75..7af0d31397 100644 --- a/backend/infrahub/graphql/__init__.py +++ b/backend/infrahub/graphql/__init__.py @@ -54,7 +54,8 @@ class Mutation(InfrahubBaseMutation, MutationMixin): async def get_gql_subscription( - db: InfrahubDatabase, branch: Union[Branch, str] = None # pylint: disable=unused-argument + db: InfrahubDatabase, # pylint: disable=unused-argument + branch: Union[Branch, str] = None, # pylint: disable=unused-argument ) -> type[InfrahubBaseSubscription]: class Subscription(InfrahubBaseSubscription): pass diff --git a/backend/infrahub/graphql/analyzer.py b/backend/infrahub/graphql/analyzer.py index b55feaa7bd..93f99418d5 100644 --- a/backend/infrahub/graphql/analyzer.py +++ b/backend/infrahub/graphql/analyzer.py @@ -2,8 +2,10 @@ from graphql import ( DocumentNode, + FieldNode, GraphQLError, GraphQLSchema, + OperationDefinitionNode, OperationType, parse, validate, @@ -27,13 +29,18 @@ class GraphQLQueryVariable(BaseModel): default_value: Optional[Any] = None +class GraphQLOperation(BaseModel): + name: Optional[str] + operation_type: OperationType + + class GraphQLQueryAnalyzer: def __init__(self, query: str, schema: Optional[GraphQLSchema] = None, branch: Optional[Branch] = None): self.query: str = query self.schema: Optional[GraphQLSchema] = schema self.branch: Optional[Branch] = branch self.document: DocumentNode = parse(self.query) - self._fields: Dict = None + self._fields: Optional[Dict] = None @property def is_valid(self) -> Tuple[bool, Optional[List[GraphQLError]]]: @@ -48,14 +55,30 @@ def nbr_queries(self) -> int: return len(self.document.definitions) @property - def operations(self) -> Set[OperationType]: - return {definition.operation for definition in self.document.definitions} + def operations(self) -> List[GraphQLOperation]: + operations = [] + for definition in self.document.definitions: + if not isinstance(definition, OperationDefinitionNode): + continue + operation_type = definition.operation + for field_node in definition.selection_set.selections: + if not isinstance(field_node, FieldNode): + continue + operations.append(GraphQLOperation(operation_type=operation_type, name=field_node.name.value)) + return operations + + @property + def contains_mutation(self) -> bool: + return any(op.operation_type == OperationType.MUTATION for op in self.operations) @property def variables(self) -> List[GraphQLQueryVariable]: response = [] for definition in self.document.definitions: - for variable in definition.variable_definitions: + variable_definitions = getattr(definition, "variable_definitions", None) + if not variable_definitions: + continue + for variable in variable_definitions: data = {"name": variable.variable.name.value} non_null = False if variable.type.kind == "non_null_type": @@ -100,15 +123,16 @@ async def get_models_in_use(self) -> Set[str]: graphql_types = set() models = set() - if not self.schema and not self.branch: - raise ValueError("Schema and Branch msut be provided to extract the models in use.") + if not (self.schema and self.branch): + raise ValueError("Schema and Branch must be provided to extract the models in use.") for definition in self.document.definitions: fields = await extract_fields(definition.selection_set) - if definition.operation == OperationType.QUERY: + operation = getattr(definition, "operation", None) + if operation == OperationType.QUERY: schema = self.schema.query_type - elif definition.operation == OperationType.MUTATION: + elif operation == OperationType.MUTATION: schema = self.schema.mutation_type else: # Subscription not supported right now diff --git a/backend/infrahub/storage/__init__.py b/backend/infrahub/graphql/api/__init__.py similarity index 100% rename from backend/infrahub/storage/__init__.py rename to backend/infrahub/graphql/api/__init__.py diff --git a/backend/infrahub/graphql/api/dependencies.py b/backend/infrahub/graphql/api/dependencies.py new file mode 100644 index 0000000000..a7d88b4c3f --- /dev/null +++ b/backend/infrahub/graphql/api/dependencies.py @@ -0,0 +1,32 @@ +from typing import Any + +import infrahub.config as config + +from ..app import InfrahubGraphQLApp +from ..auth.query_permission_checker.anonymous_checker import AnonymousGraphQLPermissionChecker +from ..auth.query_permission_checker.checker import GraphQLQueryPermissionChecker +from ..auth.query_permission_checker.default_checker import DefaultGraphQLPermissionChecker +from ..auth.query_permission_checker.read_only_checker import ReadOnlyGraphQLPermissionChecker +from ..auth.query_permission_checker.read_write_checker import ReadWriteGraphQLPermissionChecker +from .graphiql import make_graphiql_handler + + +def get_anonymous_access_setting() -> bool: + return config.SETTINGS.main.allow_anonymous_access + + +def build_graphql_query_permission_checker() -> GraphQLQueryPermissionChecker: + return GraphQLQueryPermissionChecker( + [ + ReadWriteGraphQLPermissionChecker(), + ReadOnlyGraphQLPermissionChecker(), + AnonymousGraphQLPermissionChecker(get_anonymous_access_setting), + DefaultGraphQLPermissionChecker(), + ] + ) + + +def build_graphql_app(**kwargs: Any) -> InfrahubGraphQLApp: + if "on_get" not in kwargs: + kwargs["on_get"] = make_graphiql_handler() + return InfrahubGraphQLApp(build_graphql_query_permission_checker(), **kwargs) diff --git a/backend/infrahub/graphql/api/endpoints.py b/backend/infrahub/graphql/api/endpoints.py new file mode 100644 index 0000000000..08a5048a10 --- /dev/null +++ b/backend/infrahub/graphql/api/endpoints.py @@ -0,0 +1,12 @@ +from fastapi import APIRouter + +from .dependencies import build_graphql_app + +router = APIRouter(redirect_slashes=False) + + +graphql_app = build_graphql_app() +router.add_route(path="/graphql", endpoint=graphql_app, methods=["GET", "POST", "OPTIONS"]) +router.add_route(path="/graphql/{branch_name:path}", endpoint=graphql_app, methods=["GET", "POST", "OPTIONS"]) +# router.add_websocket_route(path="/", route=graphql_app) +# router.add_websocket_route(path="/{branch_name:str}", route=graphql_app) diff --git a/backend/infrahub/graphql/api/graphiql.py b/backend/infrahub/graphql/api/graphiql.py new file mode 100644 index 0000000000..52789b3df0 --- /dev/null +++ b/backend/infrahub/graphql/api/graphiql.py @@ -0,0 +1,104 @@ +from typing import Callable + +from starlette.requests import Request +from starlette.responses import HTMLResponse, Response + + +def make_graphiql_handler() -> Callable[[Request], Response]: + def handler(_: Request) -> Response: + return HTMLResponse(_GRAPHIQL_HTML) + + return handler + + +_GRAPHIQL_HTML = """ + + + + + GraphiQL + + + + + + + + + + + + + + +
Loading...
+ + +""".strip() # noqa: B950 diff --git a/backend/infrahub/graphql/app.py b/backend/infrahub/graphql/app.py index c3de230a17..48bf6d554b 100644 --- a/backend/infrahub/graphql/app.py +++ b/backend/infrahub/graphql/app.py @@ -41,12 +41,12 @@ from starlette.background import BackgroundTasks from starlette.datastructures import UploadFile from starlette.requests import HTTPConnection, Request -from starlette.responses import HTMLResponse, JSONResponse, Response +from starlette.responses import JSONResponse, Response from starlette.websockets import WebSocket, WebSocketDisconnect, WebSocketState from infrahub.api.dependencies import api_key_scheme, cookie_auth_scheme, jwt_scheme from infrahub.auth import AccountSession, authentication_token -from infrahub.exceptions import AuthorizationError, PermissionDeniedError +from infrahub.graphql.analyzer import GraphQLQueryAnalyzer # pylint: disable=no-name-in-module,unused-argument,ungrouped-imports,raise-missing-from @@ -78,6 +78,8 @@ from infrahub.core.branch import Branch from infrahub.database import InfrahubDatabase + from .auth.query_permission_checker.checker import GraphQLQueryPermissionChecker + GQL_CONNECTION_ACK = "connection_ack" GQL_CONNECTION_ERROR = "connection_error" @@ -93,16 +95,10 @@ RootValue = Any -def make_graphiql_handler() -> Callable[[Request], Response]: - def handler(request: Request) -> Response: - return HTMLResponse(_GRAPHIQL_HTML) - - return handler - - class InfrahubGraphQLApp: def __init__( self, + permission_checker: GraphQLQueryPermissionChecker, schema: graphene.Schema = None, *, on_get: Optional[Callable[[Request], Union[Response, Awaitable[Response]]]] = None, @@ -111,7 +107,6 @@ def __init__( error_formatter: Callable[[GraphQLError], GraphQLFormattedError] = format_error, logger_name: Optional[str] = None, execution_context_class: Optional[Type[ExecutionContext]] = None, - playground: bool = False, # Deprecating. Use on_get instead. ): self._schema = schema self.on_get = on_get @@ -120,9 +115,7 @@ def __init__( self.middleware = middleware self.execution_context_class = execution_context_class self.logger = logging.getLogger(logger_name or __name__) - - if playground and self.on_get is None: - self.on_get = make_graphiql_handler() + self.permission_checker = permission_checker async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] == "http": @@ -211,17 +204,17 @@ async def _handle_http_request( operation = operations query = operation["query"] + schema_branch = registry.schema.get_schema_branch(name=branch.name) + graphql_schema = await schema_branch.get_graphql_schema(db=db) + analyzed_query = GraphQLQueryAnalyzer(query=query, schema=graphql_schema, branch=branch) + await self.permission_checker.check(account_session=account_session, analyzed_query=analyzed_query) + variable_values = operation.get("variables") operation_name = operation.get("operationName") - self._validate_authentication(account_session=account_session, query=query) - context_value = await self._get_context_value( request=request, db=db, branch=branch, account_session=account_session ) - schema_branch = registry.schema.get_schema_branch(name=branch.name) - graphql_schema = await schema_branch.get_graphql_schema(db=db) - result = await graphql( schema=graphql_schema, source=query, @@ -430,24 +423,6 @@ async def _observe_subscription( if WebSocketState.DISCONNECTED not in (websocket.client_state, websocket.application_state): await websocket.send_json({"type": GQL_COMPLETE, "id": operation_id}) - @staticmethod - def _validate_authentication(account_session: AccountSession, query: str) -> None: - document = parse(query) - query_type = document.definitions[0].operation.value - - if account_session.authenticated: - if not account_session.read_only: - return - if account_session.read_only and query_type != "mutation": - return - - raise PermissionDeniedError("The current account is not authorized to perform this operation") - - if config.SETTINGS.main.allow_anonymous_access and query_type != "mutation": - return - - raise AuthorizationError("Authentication is required to perform this operation") - async def _get_operation_from_request( request: Request, @@ -511,96 +486,3 @@ def _inject_file_to_operations(ops_tree: Any, _file: UploadFile, path: Sequence[ ops_tree[key] = _file else: _inject_file_to_operations(ops_tree[key], _file, path[1:]) - - -_GRAPHIQL_HTML = """ - - - - - GraphiQL - - - - - - - - - - - - - - -
Loading...
- - -""".strip() # noqa: B950 diff --git a/backend/tests/unit/checks/__init__.py b/backend/infrahub/graphql/auth/__init__.py similarity index 100% rename from backend/tests/unit/checks/__init__.py rename to backend/infrahub/graphql/auth/__init__.py diff --git a/docs/release-nodes/readme.md b/backend/infrahub/graphql/auth/query_permission_checker/__init__.py similarity index 100% rename from docs/release-nodes/readme.md rename to backend/infrahub/graphql/auth/query_permission_checker/__init__.py diff --git a/backend/infrahub/graphql/auth/query_permission_checker/anonymous_checker.py b/backend/infrahub/graphql/auth/query_permission_checker/anonymous_checker.py new file mode 100644 index 0000000000..1f252c7ffd --- /dev/null +++ b/backend/infrahub/graphql/auth/query_permission_checker/anonymous_checker.py @@ -0,0 +1,20 @@ +from typing import Callable + +from infrahub.auth import AccountSession +from infrahub.exceptions import AuthorizationError +from infrahub.graphql.analyzer import GraphQLQueryAnalyzer + +from .interface import GraphQLQueryPermissionCheckerInterface + + +class AnonymousGraphQLPermissionChecker(GraphQLQueryPermissionCheckerInterface): + def __init__(self, anonymous_access_allowed_func: Callable[[], bool]): + self.anonymous_access_allowed_func = anonymous_access_allowed_func + + async def supports(self, account_session: AccountSession) -> bool: + return not account_session.authenticated + + async def check(self, analyzed_query: GraphQLQueryAnalyzer): + if self.anonymous_access_allowed_func() and not analyzed_query.contains_mutation: + return + raise AuthorizationError("Authentication is required to perform this operation") diff --git a/backend/infrahub/graphql/auth/query_permission_checker/checker.py b/backend/infrahub/graphql/auth/query_permission_checker/checker.py new file mode 100644 index 0000000000..2ed9804230 --- /dev/null +++ b/backend/infrahub/graphql/auth/query_permission_checker/checker.py @@ -0,0 +1,19 @@ +from typing import List + +from infrahub.auth import AccountSession +from infrahub.exceptions import PermissionDeniedError +from infrahub.graphql.analyzer import GraphQLQueryAnalyzer + +from .interface import GraphQLQueryPermissionCheckerInterface + + +class GraphQLQueryPermissionChecker: + def __init__(self, sub_checkers: List[GraphQLQueryPermissionCheckerInterface]): + self.sub_checkers = sub_checkers + + async def check(self, account_session: AccountSession, analyzed_query: GraphQLQueryAnalyzer): + for sub_checker in self.sub_checkers: + if await sub_checker.supports(account_session): + await sub_checker.check(analyzed_query) + return + raise PermissionDeniedError("The current account is not authorized to perform this operation") diff --git a/backend/infrahub/graphql/auth/query_permission_checker/default_checker.py b/backend/infrahub/graphql/auth/query_permission_checker/default_checker.py new file mode 100644 index 0000000000..78cc43a57c --- /dev/null +++ b/backend/infrahub/graphql/auth/query_permission_checker/default_checker.py @@ -0,0 +1,13 @@ +from infrahub.auth import AccountSession +from infrahub.exceptions import AuthorizationError +from infrahub.graphql.analyzer import GraphQLQueryAnalyzer + +from .interface import GraphQLQueryPermissionCheckerInterface + + +class DefaultGraphQLPermissionChecker(GraphQLQueryPermissionCheckerInterface): + async def supports(self, account_session: AccountSession) -> bool: + return True + + async def check(self, analyzed_query: GraphQLQueryAnalyzer): + raise AuthorizationError("Authentication is required to perform this operation") diff --git a/backend/infrahub/graphql/auth/query_permission_checker/interface.py b/backend/infrahub/graphql/auth/query_permission_checker/interface.py new file mode 100644 index 0000000000..44b4c6e56a --- /dev/null +++ b/backend/infrahub/graphql/auth/query_permission_checker/interface.py @@ -0,0 +1,14 @@ +from abc import ABC, abstractmethod + +from infrahub.auth import AccountSession +from infrahub.graphql.analyzer import GraphQLQueryAnalyzer + + +class GraphQLQueryPermissionCheckerInterface(ABC): + @abstractmethod + async def supports(self, account_session: AccountSession) -> bool: + ... + + @abstractmethod + async def check(self, analyzed_query: GraphQLQueryAnalyzer): + ... diff --git a/backend/infrahub/graphql/auth/query_permission_checker/read_only_checker.py b/backend/infrahub/graphql/auth/query_permission_checker/read_only_checker.py new file mode 100644 index 0000000000..8ea89de069 --- /dev/null +++ b/backend/infrahub/graphql/auth/query_permission_checker/read_only_checker.py @@ -0,0 +1,22 @@ +from graphql import OperationType + +from infrahub.auth import AccountSession +from infrahub.exceptions import PermissionDeniedError +from infrahub.graphql.analyzer import GraphQLQueryAnalyzer + +from .interface import GraphQLQueryPermissionCheckerInterface + + +class ReadOnlyGraphQLPermissionChecker(GraphQLQueryPermissionCheckerInterface): + allowed_readonly_mutations = ["CoreAccountSelfUpdate"] + + async def supports(self, account_session: AccountSession) -> bool: + return account_session.authenticated and account_session.read_only + + async def check(self, analyzed_query: GraphQLQueryAnalyzer): + for operation in analyzed_query.operations: + if ( + operation.operation_type == OperationType.MUTATION + and operation.name not in self.allowed_readonly_mutations + ): + raise PermissionDeniedError("The current account is not authorized to perform this operation") diff --git a/backend/infrahub/graphql/auth/query_permission_checker/read_write_checker.py b/backend/infrahub/graphql/auth/query_permission_checker/read_write_checker.py new file mode 100644 index 0000000000..0200ddb87d --- /dev/null +++ b/backend/infrahub/graphql/auth/query_permission_checker/read_write_checker.py @@ -0,0 +1,12 @@ +from infrahub.auth import AccountSession +from infrahub.graphql.analyzer import GraphQLQueryAnalyzer + +from .interface import GraphQLQueryPermissionCheckerInterface + + +class ReadWriteGraphQLPermissionChecker(GraphQLQueryPermissionCheckerInterface): + async def supports(self, account_session: AccountSession) -> bool: + return account_session.authenticated and not account_session.read_only + + async def check(self, analyzed_query: GraphQLQueryAnalyzer): + return diff --git a/backend/infrahub/graphql/generator.py b/backend/infrahub/graphql/generator.py index ce83e4468a..1da23e8b8f 100644 --- a/backend/infrahub/graphql/generator.py +++ b/backend/infrahub/graphql/generator.py @@ -1,7 +1,8 @@ from __future__ import annotations from collections import defaultdict -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union import graphene @@ -56,9 +57,7 @@ def load_node_interface(branch: Branch): registry.set_graphql_type(name=paginated_interface._meta.name, graphql_type=paginated_interface, branch=branch.name) -async def generate_object_types( - db: InfrahubDatabase, branch: Union[Branch, str] -): # pylint: disable=too-many-branches,too-many-statements +async def generate_object_types(db: InfrahubDatabase, branch: Union[Branch, str]): # pylint: disable=too-many-branches,too-many-statements """Generate all GraphQL objects for the schema and store them in the internal registry.""" branch = await get_branch(db=db, branch=branch) @@ -239,11 +238,12 @@ async def generate_mutation_mixin(db: InfrahubDatabase, branch: Union[Branch, st } base_class = mutation_map.get(node_schema.kind, InfrahubMutation) - create, update, delete = generate_graphql_mutations(branch=branch, schema=node_schema, base_class=base_class) + mutations = generate_graphql_mutations(branch=branch, schema=node_schema, base_class=base_class) - class_attrs[f"{node_schema.kind}Create"] = create.Field() - class_attrs[f"{node_schema.kind}Update"] = update.Field() - class_attrs[f"{node_schema.kind}Delete"] = delete.Field() + class_attrs[f"{node_schema.kind}Create"] = mutations.create.Field() + class_attrs[f"{node_schema.kind}Update"] = mutations.update.Field() + class_attrs[f"{node_schema.kind}Upsert"] = mutations.upsert.Field() + class_attrs[f"{node_schema.kind}Delete"] = mutations.delete.Field() return type("MutationMixin", (object,), class_attrs) @@ -449,18 +449,41 @@ def generate_paginated_interface_object( return type(f"NestedPaginated{schema.kind}", (InfrahubObject,), main_attrs) +@dataclass +class GraphqlMutations: + create: Type[InfrahubMutation] + update: Type[InfrahubMutation] + upsert: Type[InfrahubMutation] + delete: Type[InfrahubMutation] + + def generate_graphql_mutations( schema: NodeSchema, base_class: type[InfrahubMutation], branch: Branch -) -> Tuple[Type[InfrahubMutation], Type[InfrahubMutation], Type[InfrahubMutation]]: - create = generate_graphql_mutation_create(branch=branch, schema=schema, base_class=base_class) - update = generate_graphql_mutation_update(branch=branch, schema=schema, base_class=base_class) +) -> GraphqlMutations: + graphql_mutation_create_input = generate_graphql_mutation_create_input(schema) + graphql_mutation_update_input = generate_graphql_mutation_update_input(schema) + + create = generate_graphql_mutation_create( + branch=branch, schema=schema, base_class=base_class, input_type=graphql_mutation_create_input + ) + upsert = generate_graphql_mutation_create( + branch=branch, + schema=schema, + base_class=base_class, + input_type=graphql_mutation_create_input, + mutation_type="Upsert", + ) + update = generate_graphql_mutation_update( + branch=branch, schema=schema, base_class=base_class, input_type=graphql_mutation_update_input + ) delete = generate_graphql_mutation_delete(branch=branch, schema=schema, base_class=base_class) registry.set_graphql_type(name=create._meta.name, graphql_type=create, branch=branch.name) registry.set_graphql_type(name=update._meta.name, graphql_type=update, branch=branch.name) + registry.set_graphql_type(name=upsert._meta.name, graphql_type=upsert, branch=branch.name) registry.set_graphql_type(name=delete._meta.name, graphql_type=delete, branch=branch.name) - return create, update, delete + return GraphqlMutations(create=create, update=update, upsert=upsert, delete=delete) def generate_graphql_mutation_create_input(schema: NodeSchema) -> Type[graphene.InputObjectType]: @@ -476,6 +499,9 @@ class StatusCreateInput(InputObjectType): attrs: Dict[str, Union[graphene.String, graphene.InputField]] = {"id": graphene.String(required=False)} for attr in schema.attributes: + if attr.read_only: + continue + attr_type = get_attribute_type(kind=attr.kind).get_graphql_input() # A Field is not required if explicitely indicated or if a default value has been provided @@ -509,6 +535,8 @@ class StatusUpdateInput(InputObjectType): attrs: Dict[str, Union[graphene.String, graphene.InputField]] = {"id": graphene.String(required=True)} for attr in schema.attributes: + if attr.read_only: + continue attr_type = get_attribute_type(kind=attr.kind).get_graphql_input() attrs[attr.name] = graphene.InputField(attr_type, required=False, description=attr.description) @@ -527,13 +555,14 @@ class StatusUpdateInput(InputObjectType): def generate_graphql_mutation_create( schema: NodeSchema, branch: Branch, + input_type: Type[graphene.InputObjectType], base_class: type[InfrahubMutation] = InfrahubMutation, + mutation_type: str = "Create", ) -> Type[InfrahubMutation]: """Generate a GraphQL Mutation to CREATE an object based on the specified NodeSchema.""" - name = f"{schema.kind}Create" + name = f"{schema.kind}{mutation_type}" object_type = generate_graphql_object(schema=schema, branch=branch) - input_type = generate_graphql_mutation_create_input(schema=schema) main_attrs = {"ok": graphene.Boolean(), "object": graphene.Field(object_type)} @@ -551,13 +580,13 @@ def generate_graphql_mutation_create( def generate_graphql_mutation_update( schema: NodeSchema, branch: Branch, + input_type: Type[graphene.InputObjectType], base_class: type[InfrahubMutation] = InfrahubMutation, ) -> Type[InfrahubMutation]: """Generate a GraphQL Mutation to UPDATE an object based on the specified NodeSchema.""" name = f"{schema.kind}Update" object_type = generate_graphql_object(schema=schema, branch=branch) - input_type = generate_graphql_mutation_update_input(schema=schema) main_attrs = {"ok": graphene.Boolean(), "object": graphene.Field(object_type)} diff --git a/backend/infrahub/graphql/mutations/__init__.py b/backend/infrahub/graphql/mutations/__init__.py index 6fa32f68a7..ad0e871886 100644 --- a/backend/infrahub/graphql/mutations/__init__.py +++ b/backend/infrahub/graphql/mutations/__init__.py @@ -1,4 +1,4 @@ -from .account import CoreAccountTokenCreate +from .account import CoreAccountSelfUpdate, CoreAccountTokenCreate from .artifact_definition import InfrahubArtifactDefinitionMutation from .attribute import ( AnyAttributeInput, @@ -28,6 +28,7 @@ ) from .relationship import RelationshipAdd, RelationshipRemove from .repository import InfrahubRepositoryMutation +from .schema import SchemaDropdownAdd, SchemaDropdownRemove, SchemaEnumAdd, SchemaEnumRemove __all__ = [ "AnyAttributeInput", @@ -41,6 +42,7 @@ "BranchNameInput", "BranchUpdate", "CheckboxAttributeInput", + "CoreAccountSelfUpdate", "CoreAccountTokenCreate", "InfrahubArtifactDefinitionMutation", "InfrahubRepositoryMutation", @@ -57,4 +59,8 @@ "RelationshipRemove", "StringAttributeInput", "TextAttributeInput", + "SchemaDropdownAdd", + "SchemaDropdownRemove", + "SchemaEnumAdd", + "SchemaEnumRemove", ] diff --git a/backend/infrahub/graphql/mutations/account.py b/backend/infrahub/graphql/mutations/account.py index ca7903a518..27a74dd345 100644 --- a/backend/infrahub/graphql/mutations/account.py +++ b/backend/infrahub/graphql/mutations/account.py @@ -25,6 +25,11 @@ class CoreAccountTokenCreateInput(InputObjectType): expiration = InputField(String(required=False), description="Timestamp when the token expires") +class CoreAccountUpdateSelfInput(InputObjectType): + password = InputField(String(required=False), description="The new password") + description = InputField(String(required=False), description="The new description") + + class ValueType(InfrahubObjectType): value = String(required=True) @@ -53,7 +58,7 @@ async def mutate( account = results[0] - mutation_map = {"CoreAccountTokenCreate": cls.create_token} + mutation_map = {"CoreAccountTokenCreate": cls.create_token, "CoreAccountSelfUpdate": cls.update_self} return await mutation_map[cls.__name__](db=db, account=account, data=data, info=info) @classmethod @@ -74,6 +79,17 @@ async def create_token(cls, db: InfrahubDatabase, account: Node, data: Dict, inf fields = await extract_fields(info.field_nodes[0].selection_set) return cls(object=await obj.to_graphql(db=db, fields=fields.get("object", {})), ok=True) + @classmethod + async def update_self(cls, db: InfrahubDatabase, account: Node, data: Dict, info: GraphQLResolveInfo): + for field in ("password", "description"): + if value := data.get(field): + getattr(account, field).value = value + + async with db.start_transaction() as db: + await account.save(db=db) + + return cls(ok=True) + class CoreAccountTokenCreate(AccountMixin, Mutation): class Arguments: @@ -81,3 +97,10 @@ class Arguments: ok = Boolean() object = Field(CoreAccountTokenType) + + +class CoreAccountSelfUpdate(AccountMixin, Mutation): + class Arguments: + data = CoreAccountUpdateSelfInput(required=True) + + ok = Boolean() diff --git a/backend/infrahub/graphql/mutations/artifact_definition.py b/backend/infrahub/graphql/mutations/artifact_definition.py index c35cb37ff5..a22cdb5de7 100644 --- a/backend/infrahub/graphql/mutations/artifact_definition.py +++ b/backend/infrahub/graphql/mutations/artifact_definition.py @@ -23,9 +23,7 @@ class InfrahubArtifactDefinitionMutation(InfrahubMutationMixin, Mutation): @classmethod - def __init_subclass_with_meta__( - cls, schema: NodeSchema = None, _meta=None, **options - ): # pylint: disable=arguments-differ + def __init_subclass_with_meta__(cls, schema: NodeSchema = None, _meta=None, **options): # pylint: disable=arguments-differ # Make sure schema is a valid NodeSchema Node Class if not isinstance(schema, NodeSchema): raise ValueError(f"You need to pass a valid NodeSchema in '{cls.__name__}.Meta', received '{schema}'") diff --git a/backend/infrahub/graphql/mutations/graphql_query.py b/backend/infrahub/graphql/mutations/graphql_query.py index 0de387b285..1aad85c318 100644 --- a/backend/infrahub/graphql/mutations/graphql_query.py +++ b/backend/infrahub/graphql/mutations/graphql_query.py @@ -15,9 +15,7 @@ class InfrahubGraphQLQueryMutation(InfrahubMutationMixin, Mutation): @classmethod - def __init_subclass_with_meta__( - cls, schema: NodeSchema = None, _meta=None, **options - ): # pylint: disable=arguments-differ + def __init_subclass_with_meta__(cls, schema: NodeSchema = None, _meta=None, **options): # pylint: disable=arguments-differ # Make sure schema is a valid NodeSchema Node Class if not isinstance(schema, NodeSchema): raise ValueError(f"You need to pass a valid NodeSchema in '{cls.__name__}.Meta', received '{schema}'") @@ -49,7 +47,9 @@ async def extract_query_info( query_info["models"] = {"value": sorted(list(await analyzer.get_models_in_use()))} query_info["depth"] = {"value": await analyzer.calculate_depth()} query_info["height"] = {"value": await analyzer.calculate_height()} - query_info["operations"] = {"value": sorted([operation.value for operation in analyzer.operations])} + query_info["operations"] = { + "value": sorted([operation.operation_type.value for operation in analyzer.operations]) + } query_info["variables"] = {"value": [variable.dict() for variable in analyzer.variables]} return query_info diff --git a/backend/infrahub/graphql/mutations/main.py b/backend/infrahub/graphql/mutations/main.py index 7795f1ade1..a644067ca6 100644 --- a/backend/infrahub/graphql/mutations/main.py +++ b/backend/infrahub/graphql/mutations/main.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, List, Optional from graphene import InputObjectType, Mutation from graphene.types.mutation import MutationOptions @@ -22,6 +22,8 @@ from infrahub.worker import WORKER_IDENTITY from ..utils import extract_fields +from .node_getter.by_default_filter import MutationNodeGetterByDefaultFilter +from .node_getter.by_id import MutationNodeGetterById if TYPE_CHECKING: from graphql import GraphQLResolveInfo @@ -30,6 +32,8 @@ from infrahub.core.branch import Branch from infrahub.database import InfrahubDatabase + from .node_getter.interface import MutationNodeGetterInterface + # pylint: disable=unused-argument log = get_logger() @@ -61,12 +65,25 @@ async def mutate(cls, root: dict, info: GraphQLResolveInfo, *args, **kwargs): elif "Update" in cls.__name__: obj, mutation = await cls.mutate_update(root=root, info=info, branch=branch, at=at, *args, **kwargs) action = MutationAction.UPDATED + elif "Upsert" in cls.__name__: + node_manager = NodeManager() + node_getters = [ + MutationNodeGetterById(db, node_manager), + MutationNodeGetterByDefaultFilter(db, node_manager), + ] + obj, mutation, created = await cls.mutate_upsert( + root=root, info=info, branch=branch, at=at, node_getters=node_getters, *args, **kwargs + ) + if created: + action = MutationAction.ADDED + else: + action = MutationAction.UPDATED elif "Delete" in cls.__name__: obj, mutation = await cls.mutate_delete(root=root, info=info, branch=branch, at=at, *args, **kwargs) action = MutationAction.REMOVED else: raise ValueError( - f"Unexpected class Name: {cls.__name__}, should start with either Create, Update or Delete" + f"Unexpected class Name: {cls.__name__}, should end with Create, Update, Upsert, or Delete" ) if config.SETTINGS.broker.enable and info.context.get("background"): @@ -159,7 +176,7 @@ async def mutate_update( raise ValidationError( {unique_attr.name: f"An object already exist with this value: {unique_attr.name}: {attr.value}"} ) - node_id = data.pop("id") + node_id = data.pop("id", obj.id) fields = list(data.keys()) validate_mutation_permissions_update_node( operation=cls.__name__, node_id=node_id, account_session=account_session, fields=fields @@ -180,6 +197,32 @@ async def mutate_update( return obj, cls(object=await obj.to_graphql(db=db, fields=fields.get("object", {})), ok=ok) + @classmethod + async def mutate_upsert( + cls, + root: dict, + info: GraphQLResolveInfo, + data: InputObjectType, + branch: Branch, + at: str, + node_getters: List[MutationNodeGetterInterface], + database: Optional[InfrahubDatabase] = None, + ): + schema_name = cls._meta.schema.kind + node_schema = registry.get_node_schema(name=schema_name, branch=branch) + + node = None + for getter in node_getters: + node = await getter.get_node(node_schema=node_schema, data=data, branch=branch, at=at) + if node: + break + + if node: + updated_obj, mutation = await cls.mutate_update(root, info, data, branch, at, database, node) + return updated_obj, mutation, False + created_obj, mutation = await cls.mutate_create(root, info, data, branch, at) + return created_obj, mutation, True + @classmethod async def mutate_delete( cls, @@ -221,9 +264,7 @@ async def validate_constraints(cls, db: InfrahubDatabase, node: Node, branch: Op class InfrahubMutation(InfrahubMutationMixin, Mutation): @classmethod - def __init_subclass_with_meta__( - cls, schema: NodeSchema = None, _meta=None, **options - ): # pylint: disable=arguments-differ + def __init_subclass_with_meta__(cls, schema: NodeSchema = None, _meta=None, **options): # pylint: disable=arguments-differ # Make sure schema is a valid NodeSchema Node Class if not isinstance(schema, NodeSchema): raise ValueError(f"You need to pass a valid NodeSchema in '{cls.__name__}.Meta', received '{schema}'") diff --git a/sync/diffsync/diffsync/py.typed b/backend/infrahub/graphql/mutations/node_getter/__init__.py similarity index 100% rename from sync/diffsync/diffsync/py.typed rename to backend/infrahub/graphql/mutations/node_getter/__init__.py diff --git a/backend/infrahub/graphql/mutations/node_getter/by_default_filter.py b/backend/infrahub/graphql/mutations/node_getter/by_default_filter.py new file mode 100644 index 0000000000..11c6d990e1 --- /dev/null +++ b/backend/infrahub/graphql/mutations/node_getter/by_default_filter.py @@ -0,0 +1,47 @@ +from typing import Optional + +from graphene import InputObjectType + +from infrahub.core.branch import Branch +from infrahub.core.manager import NodeManager +from infrahub.core.node import Node +from infrahub.core.schema import NodeSchema +from infrahub.database import InfrahubDatabase + +from .interface import MutationNodeGetterInterface + + +class MutationNodeGetterByDefaultFilter(MutationNodeGetterInterface): + def __init__(self, db: InfrahubDatabase, node_manager: NodeManager): + self.db = db + self.node_manager = node_manager + + async def get_node( + self, + node_schema: NodeSchema, + data: InputObjectType, + branch: Branch, + at: str, + ) -> Optional[Node]: + node = None + default_filter_value = None + if not node_schema.default_filter: + return node + this_datum = data + + for filter_key in node_schema.default_filter.split("__"): + if filter_key not in this_datum: + break + this_datum = this_datum[filter_key] + default_filter_value = this_datum + + if not default_filter_value: + return node + + return await self.node_manager.get_one_by_default_filter( + db=self.db, + id=default_filter_value, + schema_name=node_schema.kind, + branch=branch, + at=at, + ) diff --git a/backend/infrahub/graphql/mutations/node_getter/by_id.py b/backend/infrahub/graphql/mutations/node_getter/by_id.py new file mode 100644 index 0000000000..07cba0081a --- /dev/null +++ b/backend/infrahub/graphql/mutations/node_getter/by_id.py @@ -0,0 +1,29 @@ +from typing import Optional + +from graphene import InputObjectType + +from infrahub.core.branch import Branch +from infrahub.core.manager import NodeManager +from infrahub.core.node import Node +from infrahub.core.schema import NodeSchema +from infrahub.database import InfrahubDatabase + +from .interface import MutationNodeGetterInterface + + +class MutationNodeGetterById(MutationNodeGetterInterface): + def __init__(self, db: InfrahubDatabase, node_manager: NodeManager): + self.db = db + self.node_manager = node_manager + + async def get_node( + self, + node_schema: NodeSchema, + data: InputObjectType, + branch: Branch, + at: str, + ) -> Optional[Node]: + node = None + if "id" not in data: + return node + return await self.node_manager.get_one(id=data["id"], db=self.db, at=at, branch=branch, kind=node_schema.kind) diff --git a/backend/infrahub/graphql/mutations/node_getter/interface.py b/backend/infrahub/graphql/mutations/node_getter/interface.py new file mode 100644 index 0000000000..c9be8056d8 --- /dev/null +++ b/backend/infrahub/graphql/mutations/node_getter/interface.py @@ -0,0 +1,20 @@ +from abc import ABC, abstractmethod +from typing import Optional + +from graphene import InputObjectType + +from infrahub.core.branch import Branch +from infrahub.core.node import Node +from infrahub.core.schema import NodeSchema + + +class MutationNodeGetterInterface(ABC): + @abstractmethod + async def get_node( + self, + node_schema: NodeSchema, + data: InputObjectType, + branch: Branch, + at: str, + ) -> Optional[Node]: + ... diff --git a/backend/infrahub/graphql/mutations/proposed_change.py b/backend/infrahub/graphql/mutations/proposed_change.py index 2e50b565e4..2a14607364 100644 --- a/backend/infrahub/graphql/mutations/proposed_change.py +++ b/backend/infrahub/graphql/mutations/proposed_change.py @@ -29,14 +29,13 @@ class CheckType(Enum): DATA = "data" REPOSITORY = "repository" SCHEMA = "schema" + USER = "user" ALL = "all" class InfrahubProposedChangeMutation(InfrahubMutationMixin, Mutation): @classmethod - def __init_subclass_with_meta__( - cls, schema: NodeSchema = None, _meta=None, **options - ): # pylint: disable=arguments-differ + def __init_subclass_with_meta__(cls, schema: NodeSchema = None, _meta=None, **options): # pylint: disable=arguments-differ # Make sure schema is a valid NodeSchema Node Class if not isinstance(schema, NodeSchema): raise ValueError(f"You need to pass a valid NodeSchema in '{cls.__name__}.Meta', received '{schema}'") @@ -217,7 +216,7 @@ async def mutate( await rpc_client.send(messages.RequestProposedChangeRefreshArtifacts(proposed_change=proposed_change.id)) elif check_type == CheckType.DATA: await rpc_client.send(messages.RequestProposedChangeDataIntegrity(proposed_change=proposed_change.id)) - elif check_type == CheckType.REPOSITORY: + elif check_type in [CheckType.REPOSITORY, CheckType.USER]: await rpc_client.send(messages.RequestProposedChangeRepositoryChecks(proposed_change=proposed_change.id)) elif check_type == CheckType.SCHEMA: await rpc_client.send(messages.RequestProposedChangeSchemaIntegrity(proposed_change=proposed_change.id)) diff --git a/backend/infrahub/graphql/mutations/repository.py b/backend/infrahub/graphql/mutations/repository.py index ed30281a53..845912f9ec 100644 --- a/backend/infrahub/graphql/mutations/repository.py +++ b/backend/infrahub/graphql/mutations/repository.py @@ -21,9 +21,7 @@ class InfrahubRepositoryMutation(InfrahubMutationMixin, Mutation): @classmethod - def __init_subclass_with_meta__( - cls, schema: NodeSchema = None, _meta=None, **options - ): # pylint: disable=arguments-differ + def __init_subclass_with_meta__(cls, schema: NodeSchema = None, _meta=None, **options): # pylint: disable=arguments-differ # Make sure schema is a valid NodeSchema Node Class if not isinstance(schema, NodeSchema): raise ValueError(f"You need to pass a valid NodeSchema in '{cls.__name__}.Meta', received '{schema}'") diff --git a/backend/infrahub/graphql/mutations/schema.py b/backend/infrahub/graphql/mutations/schema.py new file mode 100644 index 0000000000..cc1a0a4cf2 --- /dev/null +++ b/backend/infrahub/graphql/mutations/schema.py @@ -0,0 +1,247 @@ +from typing import Dict, Union + +from graphene import Boolean, InputObjectType, Mutation, String +from graphql import GraphQLResolveInfo + +from infrahub import config, lock +from infrahub.core import registry +from infrahub.core.branch import Branch +from infrahub.core.constants import RESTRICTED_NAMESPACES +from infrahub.core.manager import NodeManager +from infrahub.core.schema import DropdownChoice, GenericSchema, GroupSchema, NodeSchema +from infrahub.database import InfrahubDatabase +from infrahub.exceptions import ValidationError +from infrahub.log import get_logger +from infrahub.message_bus import Meta, messages +from infrahub.services import services +from infrahub.worker import WORKER_IDENTITY + +log = get_logger() + + +class SchemaEnumInput(InputObjectType): + kind = String(required=True) + attribute = String(required=True) + enum = String(required=True) + + +class SchemaDropdownRemoveInput(InputObjectType): + kind = String(required=True) + attribute = String(required=True) + dropdown = String(required=True) + + +class SchemaDropdownAddInput(SchemaDropdownRemoveInput): + color = String(required=False) + description = String(required=False) + label = String(required=False) + + +class SchemaDropdownAdd(Mutation): + class Arguments: + data = SchemaDropdownAddInput(required=True) + + ok = Boolean() + + @classmethod + async def mutate( + cls, + root: dict, # pylint: disable=unused-argument + info: GraphQLResolveInfo, + data: SchemaDropdownAddInput, + ) -> Dict[str, bool]: + db: InfrahubDatabase = info.context.get("infrahub_database") + branch: Branch = info.context.get("infrahub_branch") + kind = registry.get_schema(name=str(data.kind), branch=branch.name) + + attribute = str(data.attribute) + validate_kind_dropdown(kind=kind, attribute=attribute) + dropdown = str(data.dropdown) + color = str(data.color) if data.color else "" + description = str(data.description) if data.description else "" + label = str(data.label) if data.label else "" + choice = DropdownChoice(name=dropdown, color=color, label=label, description=description) + + if found_attribute := [attrib for attrib in kind.attributes if attrib.name == attribute]: + attrib = found_attribute[0] + if [dropdown_entry for dropdown_entry in attrib.choices if dropdown_entry.name == dropdown]: + raise ValidationError( + f"The dropdown value {dropdown} already exists on {kind.kind} in attribute {attribute}" + ) + attrib.choices.append(choice) + + await update_registry(kind=kind, branch=branch, db=db) + + return {"ok": True} + + +class SchemaDropdownRemove(Mutation): + class Arguments: + data = SchemaDropdownRemoveInput(required=True) + + ok = Boolean() + + @classmethod + async def mutate( + cls, + root: dict, # pylint: disable=unused-argument + info: GraphQLResolveInfo, + data: SchemaDropdownRemoveInput, + ) -> Dict[str, bool]: + db: InfrahubDatabase = info.context.get("infrahub_database") + branch: Branch = info.context.get("infrahub_branch") + kind = registry.get_schema(name=str(data.kind), branch=branch.name) + + attribute = str(data.attribute) + dropdown = str(data.dropdown) + validate_kind_dropdown(kind=kind, attribute=attribute) + nodes_with_dropdown = await NodeManager.query( + db=db, schema=kind.kind, filters={f"{attribute}__value": dropdown} + ) + if nodes_with_dropdown: + raise ValidationError(f"There are still {kind.kind} objects using this dropdown") + + if found_attribute := [attrib for attrib in kind.attributes if attrib.name == attribute]: + attrib = found_attribute[0] + if not [dropdown_entry for dropdown_entry in attrib.choices if dropdown_entry.name == dropdown]: + raise ValidationError( + f"The dropdown value {dropdown} does not exists on {kind.kind} in attribute {attribute}" + ) + if len(attrib.choices) == 1: + raise ValidationError(f"Unable to remove the last dropdown on {kind.kind} in attribute {attribute}") + attrib.choices = [entry for entry in attrib.choices if dropdown != entry.name] + + await update_registry(kind=kind, branch=branch, db=db) + + return {"ok": True} + + +class SchemaEnumAdd(Mutation): + class Arguments: + data = SchemaEnumInput(required=True) + + ok = Boolean() + + @classmethod + async def mutate( + cls, + root: dict, # pylint: disable=unused-argument + info: GraphQLResolveInfo, + data: SchemaEnumInput, + ) -> Dict[str, bool]: + db: InfrahubDatabase = info.context.get("infrahub_database") + branch: Branch = info.context.get("infrahub_branch") + kind = registry.get_schema(name=str(data.kind), branch=branch.name) + + attribute = str(data.attribute) + enum = str(data.enum) + validate_kind_enum(kind=kind, attribute=attribute) + + for attrib in kind.attributes: + if attribute == attrib.name: + if enum in attrib.enum: + raise ValidationError( + f"The enum value {enum} already exists on {kind.kind} in attribute {attribute}" + ) + attrib.enum.append(enum) + + await update_registry(kind=kind, branch=branch, db=db) + + return {"ok": True} + + +class SchemaEnumRemove(Mutation): + class Arguments: + data = SchemaEnumInput(required=True) + + ok = Boolean() + + @classmethod + async def mutate( + cls, + root: dict, # pylint: disable=unused-argument + info: GraphQLResolveInfo, + data: SchemaEnumInput, + ) -> Dict[str, bool]: + db: InfrahubDatabase = info.context.get("infrahub_database") + branch: Branch = info.context.get("infrahub_branch") + kind = registry.get_schema(name=str(data.kind), branch=branch.name) + + attribute = str(data.attribute) + enum = str(data.enum) + validate_kind_enum(kind=kind, attribute=attribute) + nodes_with_enum = await NodeManager.query(db=db, schema=kind.kind, filters={f"{attribute}__value": enum}) + if nodes_with_enum: + raise ValidationError(f"There are still {kind.kind} objects using this enum") + + for attrib in kind.attributes: + if attribute == attrib.name: + if enum not in attrib.enum: + raise ValidationError( + f"The enum value {enum} does not exists on {kind.kind} in attribute {attribute}" + ) + if len(attrib.enum) == 1: + raise ValidationError(f"Unable to remove the last enum on {kind.kind} in attribute {attribute}") + attrib.enum = [entry for entry in attrib.enum if entry != enum] + + await update_registry(kind=kind, branch=branch, db=db) + + return {"ok": True} + + +def validate_kind_dropdown(kind: Union[GenericSchema, GroupSchema, NodeSchema], attribute: str) -> None: + validate_kind(kind=kind, attribute=attribute) + matching_attribute = [attrib for attrib in kind.attributes if attrib.name == attribute] + if matching_attribute and matching_attribute[0].kind != "Dropdown": + raise ValidationError(f"Attribute {attribute} on {kind.kind} is not a Dropdown") + + +def validate_kind_enum(kind: Union[GenericSchema, GroupSchema, NodeSchema], attribute: str) -> None: + validate_kind(kind=kind, attribute=attribute) + matching_attribute = [attrib for attrib in kind.attributes if attrib.name == attribute] + if not matching_attribute[0].enum: + raise ValidationError(f"Attribute {attribute} on {kind.kind} is not an enum") + + +def validate_kind(kind: Union[GenericSchema, GroupSchema, NodeSchema], attribute: str) -> None: + if isinstance(kind, GroupSchema): + raise ValidationError(f"{kind.kind} is not a valid node") + if kind.namespace in RESTRICTED_NAMESPACES: + raise ValidationError(f"Operation not allowed for {kind.kind} in restricted namespace {kind.namespace}") + if attribute not in kind.attribute_names: + raise ValidationError(f"Attribute {attribute} does not exist on {kind.kind}") + + matching_attribute = [attrib for attrib in kind.attributes if attrib.name == attribute] + + if matching_attribute[0].inherited: + raise ValidationError(f"Attribute {attribute} on {kind.kind} is inherited and must be changed on the generic") + + +async def update_registry(kind: NodeSchema, branch: Branch, db: InfrahubDatabase) -> None: + async with lock.registry.global_schema_lock(): + branch_schema = registry.schema.get_schema_branch(name=branch.name) + + # We create a copy of the existing branch schema to do some validation before loading it. + tmp_schema = branch_schema.duplicate() + + tmp_schema.set(name=kind.kind, schema=kind) + tmp_schema.process() + + diff = tmp_schema.diff(branch_schema) + + if diff.all: + log.info(f"Schema has diff, will need to be updated {diff.all}", branch=branch.name) + async with db.start_transaction() as db: + await registry.schema.update_schema_branch( + schema=tmp_schema, db=db, branch=branch.name, limit=diff.all, update_db=True + ) + branch.update_schema_hash() + log.info("Schema has been updated", branch=branch.name, hash=branch.schema_hash.main) + await branch.save(db=db) + + if config.SETTINGS.broker.enable: + message = messages.EventSchemaUpdate( + branch=branch.name, + meta=Meta(initiator_id=WORKER_IDENTITY), + ) + await services.send(message) diff --git a/backend/infrahub/graphql/queries/diff.py b/backend/infrahub/graphql/queries/diff.py index 6c1a183aee..07022bc4bb 100644 --- a/backend/infrahub/graphql/queries/diff.py +++ b/backend/infrahub/graphql/queries/diff.py @@ -1,8 +1,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any, Dict, Optional, Union from typing import List as TypingList -from typing import Optional, Union from graphene import Boolean, Field, List, ObjectType, String diff --git a/backend/infrahub/graphql/schema.py b/backend/infrahub/graphql/schema.py index 2500c447a4..22989d8d07 100644 --- a/backend/infrahub/graphql/schema.py +++ b/backend/infrahub/graphql/schema.py @@ -15,11 +15,16 @@ BranchRebase, BranchUpdate, BranchValidate, + CoreAccountSelfUpdate, CoreAccountTokenCreate, ProposedChangeRequestRefreshArtifacts, ProposedChangeRequestRunCheck, RelationshipAdd, RelationshipRemove, + SchemaDropdownAdd, + SchemaDropdownRemove, + SchemaEnumAdd, + SchemaEnumRemove, ) from .queries import BranchQueryList, DiffSummary from .utils import extract_fields @@ -71,6 +76,7 @@ class InfrahubBaseQuery(ObjectType): class InfrahubBaseMutation(ObjectType): CoreAccountTokenCreate = CoreAccountTokenCreate.Field() + CoreAccountSelfUpdate = CoreAccountSelfUpdate.Field() CoreProposedChangeRunCheck = ProposedChangeRequestRunCheck.Field() CoreProposedChangeRefreshArtifacts = ProposedChangeRequestRefreshArtifacts.Field() @@ -83,3 +89,7 @@ class InfrahubBaseMutation(ObjectType): RelationshipAdd = RelationshipAdd.Field() RelationshipRemove = RelationshipRemove.Field() + SchemaDropdownAdd = SchemaDropdownAdd.Field() + SchemaDropdownRemove = SchemaDropdownRemove.Field() + SchemaEnumAdd = SchemaEnumAdd.Field() + SchemaEnumRemove = SchemaEnumRemove.Field() diff --git a/backend/infrahub/graphql/types/__init__.py b/backend/infrahub/graphql/types/__init__.py index ac7dde56fb..0c98ed2646 100644 --- a/backend/infrahub/graphql/types/__init__.py +++ b/backend/infrahub/graphql/types/__init__.py @@ -8,7 +8,10 @@ BaseAttribute, BoolAttributeType, CheckboxAttributeType, + DropdownType, IntAttributeType, + IPHostType, + IPNetworkType, JSONAttributeType, ListAttributeType, NumberAttributeType, @@ -27,6 +30,9 @@ "RelatedNodeInput", "AttributeInterface", "BaseAttribute", + "DropdownType", + "IPHostType", + "IPNetworkType", "TextAttributeType", "NumberAttributeType", "CheckboxAttributeType", diff --git a/backend/infrahub/graphql/types/attribute.py b/backend/infrahub/graphql/types/attribute.py index 8bd13df76b..2ffecc5387 100644 --- a/backend/infrahub/graphql/types/attribute.py +++ b/backend/infrahub/graphql/types/attribute.py @@ -45,6 +45,51 @@ class Meta: interfaces = {AttributeInterface} +class DropdownType(BaseAttribute): + value = Field(String) + label = Field(String) + color = Field(String) + description = Field(String) + + class Meta: + description = "Attribute of type Dropdown" + name = "Dropdown" + interfaces = {AttributeInterface} + + +class IPHostType(BaseAttribute): + value = Field(String) + ip = Field(String) + hostmask = Field(String) + netmask = Field(String) + prefixlen = Field(String) + version = Field(Int) + with_hostmask = Field(String) + with_netmask = Field(String) + + class Meta: + description = "Attribute of type Text" + name = "IPHost" + interfaces = {AttributeInterface} + + +class IPNetworkType(BaseAttribute): + value = Field(String) + broadcast_address = Field(String) + hostmask = Field(String) + netmask = Field(String) + prefixlen = Field(String) + num_addresses = Field(Int) + version = Field(Int) + with_hostmask = Field(String) + with_netmask = Field(String) + + class Meta: + description = "Attribute of type Text" + name = "IPNetwork" + interfaces = {AttributeInterface} + + class NumberAttributeType(BaseAttribute): value = Field(Int) diff --git a/backend/infrahub/graphql/types/node.py b/backend/infrahub/graphql/types/node.py index bfab303e0b..5673b83615 100644 --- a/backend/infrahub/graphql/types/node.py +++ b/backend/infrahub/graphql/types/node.py @@ -14,9 +14,7 @@ class InfrahubObjectOptions(ObjectTypeOptions): class InfrahubObject(ObjectType, GetListMixin): @classmethod - def __init_subclass_with_meta__( - cls, schema: NodeSchema = None, interfaces=(), _meta=None, **options - ): # pylint: disable=arguments-differ + def __init_subclass_with_meta__(cls, schema: NodeSchema = None, interfaces=(), _meta=None, **options): # pylint: disable=arguments-differ if not isinstance(schema, (NodeSchema, GenericSchema)): raise ValueError(f"You need to pass a valid NodeSchema in '{cls.__name__}.Meta', received '{schema}'") diff --git a/backend/infrahub/graphql/types/union.py b/backend/infrahub/graphql/types/union.py index 131bd38e36..ead49fc736 100644 --- a/backend/infrahub/graphql/types/union.py +++ b/backend/infrahub/graphql/types/union.py @@ -19,9 +19,7 @@ class Meta: types = ("PlaceHolder",) @classmethod - def __init_subclass_with_meta__( - cls, schema: GroupSchema = None, types=(), _meta=None, **options - ): # pylint: disable=arguments-renamed + def __init_subclass_with_meta__(cls, schema: GroupSchema = None, types=(), _meta=None, **options): # pylint: disable=arguments-renamed if not isinstance(schema, GroupSchema): raise ValueError(f"You need to pass a valid GroupSchema in '{cls.__name__}.Meta', received '{schema}'") diff --git a/backend/infrahub/graphql/utils.py b/backend/infrahub/graphql/utils.py index 5f42dffaa9..de4616ea53 100644 --- a/backend/infrahub/graphql/utils.py +++ b/backend/infrahub/graphql/utils.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Dict, List, Set, Union +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Union from graphene.types.definitions import GrapheneInterfaceType, GrapheneObjectType from graphql import ( # pylint: disable=no-name-in-module @@ -36,7 +36,7 @@ def calculate_dict_height(data: dict, cnt: int = 0) -> int: return cnt -async def extract_fields(selection_set: SelectionSetNode) -> Dict[str, Dict]: +async def extract_fields(selection_set: SelectionSetNode) -> Optional[Dict[str, Dict]]: """This function extract all the requested fields in a tree of Dict from a SelectionSetNode The goal of this function is to limit the fields that we need to query from the backend. @@ -270,7 +270,7 @@ def print_selection_set(selection_set: SelectionSetNode, level: int = 1) -> int: # print(f"in print_selection_set loop {field}") # The field we are at is already a lever deeper, even if it doesn't have its own selection set. # max_depth = max(max_depth, level + 1) - print(f"{level*tab}{field.name.value}") + print(f"{level * tab}{field.name.value}") if selection_set := getattr(field, "selection_set", None): # max_depth = max(max_depth, self._get_query_depth(selection_set, level + 1)) print_selection_set(selection_set, level + 1) diff --git a/backend/infrahub/message_bus/__init__.py b/backend/infrahub/message_bus/__init__.py index 1b9579164c..46286c1b39 100644 --- a/backend/infrahub/message_bus/__init__.py +++ b/backend/infrahub/message_bus/__init__.py @@ -61,6 +61,7 @@ class Meta(BaseModel): default=None, description="Validator execution ID related to this message" ) check_execution_id: Optional[str] = Field(default=None, description="Check execution ID related to this message") + priority: int = Field(default=3, description="Message Priority") class InfrahubMessage(BaseModel, aio_pika.abc.AbstractMessage): @@ -80,6 +81,10 @@ def assign_header(self, key: str, value: Any) -> None: self.meta.headers = self.meta.headers or {} self.meta.headers[key] = value + def assign_priority(self, priority: int) -> None: + self.meta = self.meta or Meta() + self.meta.priority = priority + def set_log_data(self, routing_key: str) -> None: set_log_data(key="routing_key", value=routing_key) if self.meta: @@ -97,7 +102,7 @@ def reply_requested(self) -> bool: @property def body(self) -> bytes: - return self.json(exclude={"meta": {"headers"}, "value": True}, exclude_none=True).encode("UTF-8") + return self.json(exclude={"meta": {"headers", "priority"}, "value": True}, exclude_none=True).encode("UTF-8") @property def locked(self) -> bool: @@ -107,11 +112,17 @@ def locked(self) -> bool: def properties(self) -> aiormq.spec.Basic.Properties: correlation_id = None headers = None + priority = 3 if self.meta: correlation_id = self.meta.correlation_id headers = self.meta.headers + priority = self.meta.priority return aiormq.spec.Basic.Properties( - content_type="application/json", content_encoding="utf-8", correlation_id=correlation_id, headers=headers + content_type="application/json", + content_encoding="utf-8", + correlation_id=correlation_id, + headers=headers, + priority=priority, ) def increase_retry_count(self, count: int = 1) -> None: diff --git a/backend/infrahub/message_bus/messages/__init__.py b/backend/infrahub/message_bus/messages/__init__.py index 0f82871f34..e3bdc46ef7 100644 --- a/backend/infrahub/message_bus/messages/__init__.py +++ b/backend/infrahub/message_bus/messages/__init__.py @@ -5,6 +5,7 @@ from .check_artifact_create import CheckArtifactCreate from .check_repository_checkdefinition import CheckRepositoryCheckDefinition from .check_repository_mergeconflicts import CheckRepositoryMergeConflicts +from .check_repository_usercheck import CheckRepositoryUserCheck from .event_branch_create import EventBranchCreate from .event_branch_delete import EventBranchDelete from .event_branch_merge import EventBranchMerge @@ -21,23 +22,24 @@ from .request_artifactdefinition_check import RequestArtifactDefinitionCheck from .request_artifactdefinition_generate import RequestArtifactDefinitionGenerate from .request_git_createbranch import RequestGitCreateBranch +from .request_git_sync import RequestGitSync +from .request_proposed_change_cancel import RequestProposedChangeCancel from .request_proposedchange_dataintegrity import RequestProposedChangeDataIntegrity -from .request_proposedchange_refreshartifacts import ( - RequestProposedChangeRefreshArtifacts, -) -from .request_proposedchange_repositorychecks import ( - RequestProposedChangeRepositoryChecks, -) +from .request_proposedchange_refreshartifacts import RequestProposedChangeRefreshArtifacts +from .request_proposedchange_repositorychecks import RequestProposedChangeRepositoryChecks from .request_proposedchange_schemaintegrity import RequestProposedChangeSchemaIntegrity from .request_repository_checks import RequestRepositoryChecks +from .request_repository_userchecks import RequestRepositoryUserChecks from .transform_jinja_template import TransformJinjaTemplate from .transform_python_data import TransformPythonData from .trigger_artifact_definition_generate import TriggerArtifactDefinitionGenerate +from .trigger_proposed_change_cancel import TriggerProposedChangeCancel MESSAGE_MAP: Dict[str, Type[InfrahubMessage]] = { "check.artifact.create": CheckArtifactCreate, "check.repository.check_definition": CheckRepositoryCheckDefinition, "check.repository.merge_conflicts": CheckRepositoryMergeConflicts, + "check.repository.user_check": CheckRepositoryUserCheck, "event.branch.create": EventBranchCreate, "event.branch.delete": EventBranchDelete, "event.branch.merge": EventBranchMerge, @@ -49,22 +51,47 @@ "git.file.get": GitFileGet, "git.repository.add": GitRepositoryAdd, "git.repository.merge": GitRepositoryMerge, - "request.git.create_branch": RequestGitCreateBranch, "refresh.registry.branches": RefreshRegistryBranches, "request.artifact.generate": RequestArtifactGenerate, "request.artifact_definition.check": RequestArtifactDefinitionCheck, "request.artifact_definition.generate": RequestArtifactDefinitionGenerate, + "request.git.create_branch": RequestGitCreateBranch, + "request.git.sync": RequestGitSync, + "request.proposed_change.cancel": RequestProposedChangeCancel, "request.proposed_change.data_integrity": RequestProposedChangeDataIntegrity, "request.proposed_change.refresh_artifacts": RequestProposedChangeRefreshArtifacts, "request.proposed_change.repository_checks": RequestProposedChangeRepositoryChecks, "request.proposed_change.schema_integrity": RequestProposedChangeSchemaIntegrity, "request.repository.checks": RequestRepositoryChecks, + "request.repository.user_checks": RequestRepositoryUserChecks, "transform.jinja.template": TransformJinjaTemplate, "transform.python.data": TransformPythonData, "trigger.artifact_definition.generate": TriggerArtifactDefinitionGenerate, + "trigger.proposed_change.cancel": TriggerProposedChangeCancel, } +def message_priority(routing_key: str) -> int: + PRIORITY_MAP = { + "check.artifact.create": 2, + "check.repository.check_definition": 2, + "check.repository.merge_conflicts": 2, + "event.branch.create": 5, + "event.branch.delete": 5, + "event.branch.merge": 5, + "event.schema.update": 5, + "git.diff.names_only": 4, + "git.file.get": 4, + "request.artifact.generate": 2, + "request.git.sync": 4, + "request.proposed_change.repository_checks": 5, + "transform.jinja.template": 4, + "transform.python.data": 4, + } + + return PRIORITY_MAP.get(routing_key, 3) + + ROUTING_KEY_MAP: Dict[Type[InfrahubMessage], str] = { message: routing_key for routing_key, message in MESSAGE_MAP.items() } diff --git a/backend/infrahub/message_bus/messages/check_repository_checkdefinition.py b/backend/infrahub/message_bus/messages/check_repository_checkdefinition.py index 45d4f68f15..e4f2dcdd9a 100644 --- a/backend/infrahub/message_bus/messages/check_repository_checkdefinition.py +++ b/backend/infrahub/message_bus/messages/check_repository_checkdefinition.py @@ -4,14 +4,13 @@ class CheckRepositoryCheckDefinition(InfrahubMessage): - """Runs a check as defined within a CoreCheckDefinition within a repository.""" + """Triggers user defined checks to run based on a Check Definition.""" - validator_id: str = Field(..., description="The id of the validator associated with this check") - validator_execution_id: str = Field(..., description="The id of current execution of the associated validator") - check_execution_id: str = Field(..., description="The unique ID for the current execution of this check") + check_definition_id: str = Field(..., description="The unique ID of the check definition") commit: str = Field(..., description="The commit to target") repository_id: str = Field(..., description="The unique ID of the Repository") repository_name: str = Field(..., description="The name of the Repository") branch_name: str = Field(..., description="The branch where the check is run") file_path: str = Field(..., description="The path and filename of the check") class_name: str = Field(..., description="The name of the class containing the check") + proposed_change: str = Field(..., description="The unique ID of the Proposed Change") diff --git a/backend/infrahub/message_bus/messages/check_repository_usercheck.py b/backend/infrahub/message_bus/messages/check_repository_usercheck.py new file mode 100644 index 0000000000..c5c6552d38 --- /dev/null +++ b/backend/infrahub/message_bus/messages/check_repository_usercheck.py @@ -0,0 +1,21 @@ +from pydantic import Field + +from infrahub.message_bus import InfrahubMessage + + +class CheckRepositoryUserCheck(InfrahubMessage): + """Runs a check as defined within a CoreCheckDefinition within a repository.""" + + validator_id: str = Field(..., description="The id of the validator associated with this check") + validator_execution_id: str = Field(..., description="The id of current execution of the associated validator") + check_execution_id: str = Field(..., description="The unique ID for the current execution of this check") + check_definition_id: str = Field(..., description="The unique ID of the check definition") + commit: str = Field(..., description="The commit to target") + repository_id: str = Field(..., description="The unique ID of the Repository") + repository_name: str = Field(..., description="The name of the Repository") + branch_name: str = Field(..., description="The branch where the check is run") + file_path: str = Field(..., description="The path and filename of the check") + class_name: str = Field(..., description="The name of the class containing the check") + proposed_change: str = Field(..., description="The unique ID of the Proposed Change") + variables: dict = Field(default_factory=dict, description="Input variables when running the check") + name: str = Field(..., description="The name of the check") diff --git a/backend/infrahub/message_bus/messages/request_git_sync.py b/backend/infrahub/message_bus/messages/request_git_sync.py new file mode 100644 index 0000000000..ae31c665db --- /dev/null +++ b/backend/infrahub/message_bus/messages/request_git_sync.py @@ -0,0 +1,5 @@ +from infrahub.message_bus import InfrahubMessage + + +class RequestGitSync(InfrahubMessage): + """Request remote repositories to be synced.""" diff --git a/backend/infrahub/message_bus/messages/request_proposed_change_cancel.py b/backend/infrahub/message_bus/messages/request_proposed_change_cancel.py new file mode 100644 index 0000000000..bb54444cfd --- /dev/null +++ b/backend/infrahub/message_bus/messages/request_proposed_change_cancel.py @@ -0,0 +1,9 @@ +from pydantic import Field + +from infrahub.message_bus import InfrahubMessage + + +class RequestProposedChangeCancel(InfrahubMessage): + """Cancel the proposed change""" + + proposed_change: str = Field(..., description="The unique ID of the Proposed Change") diff --git a/backend/infrahub/message_bus/messages/request_repository_userchecks.py b/backend/infrahub/message_bus/messages/request_repository_userchecks.py new file mode 100644 index 0000000000..30cc8f4baa --- /dev/null +++ b/backend/infrahub/message_bus/messages/request_repository_userchecks.py @@ -0,0 +1,12 @@ +from pydantic import Field + +from infrahub.message_bus import InfrahubMessage + + +class RequestRepositoryUserChecks(InfrahubMessage): + """Sent to trigger the user defined checks on a repository.""" + + proposed_change: str = Field(..., description="The unique ID of the Proposed Change") + repository: str = Field(..., description="The unique ID of the Repository") + source_branch: str = Field(..., description="The source branch") + target_branch: str = Field(..., description="The target branch") diff --git a/backend/infrahub/message_bus/messages/trigger_proposed_change_cancel.py b/backend/infrahub/message_bus/messages/trigger_proposed_change_cancel.py new file mode 100644 index 0000000000..3ce54d2b7d --- /dev/null +++ b/backend/infrahub/message_bus/messages/trigger_proposed_change_cancel.py @@ -0,0 +1,9 @@ +from pydantic import Field + +from infrahub.message_bus import InfrahubMessage + + +class TriggerProposedChangeCancel(InfrahubMessage): + """Triggers request to cancel any open or closed proposed changes for a given branch""" + + branch: str = Field(..., description="The impacted branch") diff --git a/backend/infrahub/message_bus/operations/__init__.py b/backend/infrahub/message_bus/operations/__init__.py index 4b66afaf0d..d8a3f6a625 100644 --- a/backend/infrahub/message_bus/operations/__init__.py +++ b/backend/infrahub/message_bus/operations/__init__.py @@ -2,16 +2,7 @@ from infrahub.log import get_logger from infrahub.message_bus import InfrahubResponse, messages -from infrahub.message_bus.operations import ( - check, - event, - finalize, - git, - refresh, - requests, - transform, - trigger, -) +from infrahub.message_bus.operations import check, event, finalize, git, refresh, requests, transform, trigger from infrahub.message_bus.types import MessageTTL from infrahub.services import InfrahubServices from infrahub.tasks.check import set_check_status @@ -22,6 +13,7 @@ "check.artifact.create": check.artifact.create, "check.repository.check_definition": check.repository.check_definition, "check.repository.merge_conflicts": check.repository.merge_conflicts, + "check.repository.user_check": check.repository.user_check, "event.branch.create": event.branch.create, "event.branch.delete": event.branch.delete, "event.branch.merge": event.branch.merge, @@ -35,17 +27,21 @@ "git.repository.merge": git.repository.merge, "refresh.registry.branches": refresh.registry.branches, "request.git.create_branch": requests.git.create_branch, + "request.git.sync": requests.git.sync, "request.artifact.generate": requests.artifact.generate, "request.artifact_definition.check": requests.artifact_definition.check, "request.artifact_definition.generate": requests.artifact_definition.generate, + "request.proposed_change.cancel": requests.proposed_change.cancel, "request.proposed_change.data_integrity": requests.proposed_change.data_integrity, "request.proposed_change.refresh_artifacts": requests.proposed_change.refresh_artifacts, "request.proposed_change.repository_checks": requests.proposed_change.repository_checks, "request.proposed_change.schema_integrity": requests.proposed_change.schema_integrity, - "request.repository.checks": requests.repository.check, + "request.repository.checks": requests.repository.checks, + "request.repository.user_checks": requests.repository.user_checks, "transform.jinja.template": transform.jinja.template, "transform.python.data": transform.python.data, "trigger.artifact_definition.generate": trigger.artifact_definition.generate, + "trigger.proposed_change.cancel": trigger.proposed_change.cancel, } diff --git a/backend/infrahub/message_bus/operations/check/repository.py b/backend/infrahub/message_bus/operations/check/repository.py index e69d4d857d..f15abdc10f 100644 --- a/backend/infrahub/message_bus/operations/check/repository.py +++ b/backend/infrahub/message_bus/operations/check/repository.py @@ -1,78 +1,122 @@ +from typing import List + +from infrahub_sdk import UUIDT + from infrahub import lock from infrahub.core.timestamp import Timestamp from infrahub.exceptions import CheckError from infrahub.git.repository import InfrahubRepository from infrahub.log import get_logger -from infrahub.message_bus import messages +from infrahub.message_bus import InfrahubMessage, messages from infrahub.services import InfrahubServices log = get_logger() async def check_definition(message: messages.CheckRepositoryCheckDefinition, service: InfrahubServices): - validator = await service.client.get(kind="CoreRepositoryValidator", id=message.validator_id) - await validator.checks.fetch() + definition = await service.client.get(kind="CoreCheckDefinition", id=message.check_definition_id) - repo = await InfrahubRepository.init(id=message.repository_id, name=message.repository_name) - conclusion = "failure" - severity = "critical" - log_entries = "" - try: - check_run = await repo.execute_python_check( - branch_name=message.branch_name, - location=message.file_path, - class_name=message.class_name, - client=service.client, - commit=message.commit, - ) - if check_run.passed: - conclusion = "success" - severity = "info" - log.info("The check passed", check_execution_id=message.check_execution_id) - else: - log.warning("The check reported failures", check_execution_id=message.check_execution_id) - log_entries = check_run.log_entries - except CheckError as exc: - log.warning("The check failed to run", check_execution_id=message.check_execution_id) - log_entries = f"FATAL Error/n:{exc.message}" + proposed_change = await service.client.get(kind="CoreProposedChange", id=message.proposed_change) + validator_execution_id = str(UUIDT()) + check_execution_ids: List[str] = [] + await proposed_change.validations.fetch() + validator = None + events: List[InfrahubMessage] = [] + + for relationship in proposed_change.validations.peers: + existing_validator = relationship.peer - check = None - for relationship in validator.checks.peers: - existing_check = relationship.peer if ( - existing_check.typename == "CoreStandardCheck" - and existing_check.kind.value == "CheckDefinition" - and existing_check.name.value == message.class_name + existing_validator.typename == "CoreUserValidator" + and existing_validator.repository.id == message.repository_id + and existing_validator.check_definition.id == message.check_definition_id ): - check = existing_check + validator = existing_validator + service.log.info("Found the same validator", validator=validator) - if check: - check.created_at.value = Timestamp().to_string() - check.message.value = log_entries - check.conclusion.value = conclusion - check.severity.value = severity - await check.save() + if validator: + validator.conclusion.value = "unknown" + validator.state.value = "queued" + validator.started_at.value = "" + validator.completed_at.value = "" + await validator.save() else: - check = await service.client.create( - kind="CoreStandardCheck", + validator = await service.client.create( + kind="CoreUserValidator", data={ - "name": message.class_name, - "origin": message.repository_id, - "kind": "CheckDefinition", - "validator": message.validator_id, - "created_at": Timestamp().to_string(), - "message": log_entries, - "conclusion": conclusion, - "severity": severity, + "label": f"Check: {definition.name.value}", + "proposed_change": message.proposed_change, + "repository": message.repository_id, + "check_definition": message.check_definition_id, }, ) - await check.save() + await validator.save() + + if definition.targets.id: + # Check against a group of targets + await definition.targets.fetch() + group = definition.targets.peer + await group.members.fetch() + for relationship in group.members.peers: + member = relationship.peer + + check_execution_id = str(UUIDT()) + check_execution_ids.append(check_execution_id) + events.append( + messages.CheckRepositoryUserCheck( + name=member.display_label, + validator_id=validator.id, + validator_execution_id=validator_execution_id, + check_execution_id=check_execution_id, + repository_id=message.repository_id, + repository_name=message.repository_name, + commit=message.commit, + file_path=message.file_path, + class_name=message.class_name, + branch_name=message.branch_name, + check_definition_id=message.check_definition_id, + proposed_change=message.proposed_change, + variables=member.extract(params=definition.parameters.value), + ) + ) + else: + check_execution_id = str(UUIDT()) + check_execution_ids.append(check_execution_id) + events.append( + messages.CheckRepositoryUserCheck( + name=definition.name.value, + validator_id=validator.id, + validator_execution_id=validator_execution_id, + check_execution_id=check_execution_id, + repository_id=message.repository_id, + repository_name=message.repository_name, + commit=message.commit, + file_path=message.file_path, + class_name=message.class_name, + branch_name=message.branch_name, + check_definition_id=message.check_definition_id, + proposed_change=message.proposed_change, + ) + ) + + checks_in_execution = ",".join(check_execution_ids) + log.info("Checks in execution", checks=checks_in_execution) await service.cache.set( - key=f"validator_execution_id:{message.validator_execution_id}:check_execution_id:{message.check_execution_id}", - value=conclusion, - expires=7200, + key=f"validator_execution_id:{validator_execution_id}:checks", value=checks_in_execution, expires=7200 ) + events.append( + messages.FinalizeValidatorExecution( + start_time=Timestamp().to_string(), + validator_id=validator.id, + validator_execution_id=validator_execution_id, + validator_type="CoreUserValidator", + ) + ) + + for event in events: + event.assign_meta(parent=message) + await service.send(message=event) async def merge_conflicts(message: messages.CheckRepositoryMergeConflicts, service: InfrahubServices): @@ -155,3 +199,70 @@ async def merge_conflicts(message: messages.CheckRepositoryMergeConflicts, servi value=validator_conclusion, expires=7200, ) + + +async def user_check(message: messages.CheckRepositoryUserCheck, service: InfrahubServices): + validator = await service.client.get(kind="CoreUserValidator", id=message.validator_id) + await validator.checks.fetch() + + repo = await InfrahubRepository.init(id=message.repository_id, name=message.repository_name) + conclusion = "failure" + severity = "critical" + log_entries = "" + try: + check_run = await repo.execute_python_check( + branch_name=message.branch_name, + location=message.file_path, + class_name=message.class_name, + client=service.client, + commit=message.commit, + params=message.variables, + ) + if check_run.passed: + conclusion = "success" + severity = "info" + log.info("The check passed", check_execution_id=message.check_execution_id) + else: + log.warning("The check reported failures", check_execution_id=message.check_execution_id) + log_entries = check_run.log_entries + except CheckError as exc: + log.warning("The check failed to run", check_execution_id=message.check_execution_id) + log_entries = f"FATAL Error/n:{exc.message}" + + check = None + for relationship in validator.checks.peers: + existing_check = relationship.peer + if ( + existing_check.typename == "CoreStandardCheck" + and existing_check.kind.value == "CheckDefinition" + and existing_check.name.value == message.name + ): + check = existing_check + + if check: + check.created_at.value = Timestamp().to_string() + check.message.value = log_entries + check.conclusion.value = conclusion + check.severity.value = severity + await check.save() + else: + check = await service.client.create( + kind="CoreStandardCheck", + data={ + "name": message.name, + "origin": message.repository_id, + "kind": "CheckDefinition", + "validator": message.validator_id, + "created_at": Timestamp().to_string(), + "message": log_entries, + "conclusion": conclusion, + "severity": severity, + }, + ) + await check.save() + + await service.cache.set( + key=f"validator_execution_id:{message.validator_execution_id}:check_execution_id:{message.check_execution_id}", + value="success", + expires=7200, + ) diff --git a/backend/infrahub/message_bus/operations/event/branch.py b/backend/infrahub/message_bus/operations/event/branch.py index c66a1c6afd..1ba051d490 100644 --- a/backend/infrahub/message_bus/operations/event/branch.py +++ b/backend/infrahub/message_bus/operations/event/branch.py @@ -22,7 +22,10 @@ async def create(message: messages.EventBranchCreate, service: InfrahubServices) async def delete(message: messages.EventBranchDelete, service: InfrahubServices) -> None: log.info("Branch was deleted", branch=message.branch) - events: List[InfrahubMessage] = [messages.RefreshRegistryBranches()] + events: List[InfrahubMessage] = [ + messages.RefreshRegistryBranches(), + messages.TriggerProposedChangeCancel(branch=message.branch), + ] for event in events: event.assign_meta(parent=message) diff --git a/backend/infrahub/message_bus/operations/event/node.py b/backend/infrahub/message_bus/operations/event/node.py index a8fa02fa3c..60573179c1 100644 --- a/backend/infrahub/message_bus/operations/event/node.py +++ b/backend/infrahub/message_bus/operations/event/node.py @@ -6,7 +6,8 @@ async def mutated( - message: messages.EventNodeMutated, service: InfrahubServices # pylint: disable=unused-argument + message: messages.EventNodeMutated, + service: InfrahubServices, # pylint: disable=unused-argument ) -> None: log.debug( "Mutation on node", diff --git a/backend/infrahub/message_bus/operations/requests/git.py b/backend/infrahub/message_bus/operations/requests/git.py index e07e42775c..854e69a59e 100644 --- a/backend/infrahub/message_bus/operations/requests/git.py +++ b/backend/infrahub/message_bus/operations/requests/git.py @@ -1,5 +1,6 @@ from typing import List +from infrahub.git.actions import sync_remote_repositories from infrahub.log import get_logger from infrahub.message_bus import messages from infrahub.services import InfrahubServices @@ -7,7 +8,7 @@ log = get_logger() -async def create_branch(message: messages.RequestGitCreateBranch, service: InfrahubServices): +async def create_branch(message: messages.RequestGitCreateBranch, service: InfrahubServices) -> None: """Request to the creation of git branches in available repositories.""" log.info("Querying repositories for branch creation") repositories = await service.client.filters(kind="CoreRepository") @@ -24,3 +25,11 @@ async def create_branch(message: messages.RequestGitCreateBranch, service: Infra for event in events: event.assign_meta(parent=message) await service.send(message=event) + + +async def sync( + message: messages.RequestGitSync, # pylint: disable=unused-argument + service: InfrahubServices, +) -> None: + """Sync remote repositories.""" + await sync_remote_repositories(service) diff --git a/backend/infrahub/message_bus/operations/requests/proposed_change.py b/backend/infrahub/message_bus/operations/requests/proposed_change.py index 688c905bcd..58519983bd 100644 --- a/backend/infrahub/message_bus/operations/requests/proposed_change.py +++ b/backend/infrahub/message_bus/operations/requests/proposed_change.py @@ -1,6 +1,7 @@ from typing import List from infrahub.core.branch import ObjectConflict +from infrahub.core.constants import ProposedChangeState from infrahub.core.manager import NodeManager from infrahub.core.node import Node from infrahub.core.registry import registry @@ -8,7 +9,7 @@ from infrahub.core.timestamp import Timestamp from infrahub.database import InfrahubDatabase from infrahub.log import get_logger -from infrahub.message_bus import messages +from infrahub.message_bus import InfrahubMessage, messages from infrahub.services import InfrahubServices log = get_logger() @@ -39,6 +40,14 @@ async def _get_conflicts(db: InfrahubDatabase, proposed_change: Node) -> List[Ob return await diff.get_conflicts_graph(db=db) +async def cancel(message: messages.RequestProposedChangeDataIntegrity, service: InfrahubServices) -> None: + """Cancel a proposed change.""" + log.info("Cancelling proposed change", id=message.proposed_change) + proposed_change = await service.client.get(kind="CoreProposedChange", id=message.proposed_change) + proposed_change.state.value = ProposedChangeState.CANCELED.value + await proposed_change.save() + + async def data_integrity(message: messages.RequestProposedChangeDataIntegrity, service: InfrahubServices) -> None: """Triggers a data integrity validation check on the provided proposed change to start.""" log.info(f"Got a request to process data integrity defined in proposed_change: {message.proposed_change}") @@ -134,7 +143,8 @@ async def data_integrity(message: messages.RequestProposedChangeDataIntegrity, s async def schema_integrity( - message: messages.RequestProposedChangeSchemaIntegrity, service: InfrahubServices # pylint: disable=unused-argument + message: messages.RequestProposedChangeSchemaIntegrity, + service: InfrahubServices, # pylint: disable=unused-argument ) -> None: log.info(f"Got a request to process schema integrity defined in proposed_change: {message.proposed_change}") @@ -144,15 +154,27 @@ async def repository_checks(message: messages.RequestProposedChangeRepositoryChe change_proposal = await service.client.get(kind="CoreProposedChange", id=message.proposed_change) repositories = await service.client.all(kind="CoreRepository", branch=change_proposal.source_branch.value) + events: List[InfrahubMessage] = [] for repository in repositories: - msg = messages.RequestRepositoryChecks( - proposed_change=message.proposed_change, - repository=repository.id, - source_branch=change_proposal.source_branch.value, - target_branch=change_proposal.destination_branch.value, + events.append( + messages.RequestRepositoryChecks( + proposed_change=message.proposed_change, + repository=repository.id, + source_branch=change_proposal.source_branch.value, + target_branch=change_proposal.destination_branch.value, + ) ) - msg.assign_meta(parent=message) - await service.send(message=msg) + events.append( + messages.RequestRepositoryUserChecks( + proposed_change=message.proposed_change, + repository=repository.id, + source_branch=change_proposal.source_branch.value, + target_branch=change_proposal.destination_branch.value, + ) + ) + for event in events: + event.assign_meta(parent=message) + await service.send(message=event) async def refresh_artifacts(message: messages.RequestProposedChangeRefreshArtifacts, service: InfrahubServices) -> None: diff --git a/backend/infrahub/message_bus/operations/requests/repository.py b/backend/infrahub/message_bus/operations/requests/repository.py index 7bdb68c95b..065c21c5cf 100644 --- a/backend/infrahub/message_bus/operations/requests/repository.py +++ b/backend/infrahub/message_bus/operations/requests/repository.py @@ -10,13 +10,19 @@ log = get_logger() -async def check(message: messages.RequestRepositoryChecks, service: InfrahubServices): +async def checks(message: messages.RequestRepositoryChecks, service: InfrahubServices): """Request to start validation checks on a specific repository.""" log.info("Running repository checks", repository_id=message.repository, proposed_change_id=message.proposed_change) + + source_branch = await service.client.branch.get(branch_name=message.source_branch) + if source_branch.is_data_only: + return + events: List[InfrahubMessage] = [] + repository = await service.client.get(kind="CoreRepository", id=message.repository, branch=message.source_branch) proposed_change = await service.client.get(kind="CoreProposedChange", id=message.proposed_change) - source_branch = await service.client.branch.get(branch_name=message.source_branch) + validator_execution_id = str(UUIDT()) check_execution_ids: List[str] = [] await proposed_change.validations.fetch() @@ -50,42 +56,22 @@ async def check(message: messages.RequestRepositoryChecks, service: InfrahubServ ) await validator.save() - if not source_branch.is_data_only: - check_execution_id = str(UUIDT()) - check_execution_ids.append(check_execution_id) - log.info("Adding check for merge conflict") - - events.append( - messages.CheckRepositoryMergeConflicts( - validator_id=validator.id, - validator_execution_id=validator_execution_id, - check_execution_id=check_execution_id, - proposed_change=message.proposed_change, - repository_id=message.repository, - repository_name=repository.name.value, - source_branch=message.source_branch, - target_branch=message.target_branch, - ) - ) + check_execution_id = str(UUIDT()) + check_execution_ids.append(check_execution_id) + log.info("Adding check for merge conflict") - for relationship in repository.checks.peers: - log.info("Adding check for user defined check") - check_definition = relationship.peer - check_execution_id = str(UUIDT()) - check_execution_ids.append(check_execution_id) - events.append( - messages.CheckRepositoryCheckDefinition( - validator_id=validator.id, - validator_execution_id=validator_execution_id, - check_execution_id=check_execution_id, - repository_id=repository.id, - repository_name=repository.name.value, - commit=repository.commit.value, - file_path=check_definition.file_path.value, - class_name=check_definition.class_name.value, - branch_name=message.source_branch, - ) + events.append( + messages.CheckRepositoryMergeConflicts( + validator_id=validator.id, + validator_execution_id=validator_execution_id, + check_execution_id=check_execution_id, + proposed_change=message.proposed_change, + repository_id=message.repository, + repository_name=repository.name.value, + source_branch=message.source_branch, + target_branch=message.target_branch, ) + ) checks_in_execution = ",".join(check_execution_ids) log.info("Checks in execution", checks=checks_in_execution) @@ -104,3 +90,40 @@ async def check(message: messages.RequestRepositoryChecks, service: InfrahubServ for event in events: event.assign_meta(parent=message) await service.send(message=event) + + +async def user_checks(message: messages.RequestRepositoryUserChecks, service: InfrahubServices): + """Request to start validation checks on a specific repositor for User defined checks.""" + log.info( + "Running user defined checks checks", + repository_id=message.repository, + proposed_change_id=message.proposed_change, + ) + events: List[InfrahubMessage] = [] + + repository = await service.client.get( + kind="CoreRepository", + id=message.repository, + branch=message.source_branch, + ) + await repository.checks.fetch() + + for relationship in repository.checks.peers: + log.info("Adding check for user defined check") + check_definition = relationship.peer + events.append( + messages.CheckRepositoryCheckDefinition( + check_definition_id=check_definition.id, + repository_id=repository.id, + repository_name=repository.name.value, + commit=repository.commit.value, + file_path=check_definition.file_path.value, + class_name=check_definition.class_name.value, + branch_name=message.source_branch, + proposed_change=message.proposed_change, + ) + ) + + for event in events: + event.assign_meta(parent=message) + await service.send(message=event) diff --git a/backend/infrahub/message_bus/operations/trigger/__init__.py b/backend/infrahub/message_bus/operations/trigger/__init__.py index b09b918dbf..73101bbf58 100644 --- a/backend/infrahub/message_bus/operations/trigger/__init__.py +++ b/backend/infrahub/message_bus/operations/trigger/__init__.py @@ -1,3 +1,3 @@ -from . import artifact_definition +from . import artifact_definition, proposed_change -__all__ = ["artifact_definition"] +__all__ = ["artifact_definition", "proposed_change"] diff --git a/backend/infrahub/message_bus/operations/trigger/proposed_change.py b/backend/infrahub/message_bus/operations/trigger/proposed_change.py new file mode 100644 index 0000000000..d0b931c81b --- /dev/null +++ b/backend/infrahub/message_bus/operations/trigger/proposed_change.py @@ -0,0 +1,24 @@ +from infrahub.core.constants import ProposedChangeState +from infrahub.log import get_logger +from infrahub.message_bus import messages +from infrahub.services import InfrahubServices + +log = get_logger() + + +async def cancel(message: messages.TriggerProposedChangeCancel, service: InfrahubServices) -> None: + proposed_changed_opened = await service.client.filters( + kind="CoreProposedChange", include=["id", "source_branch"], state__value=ProposedChangeState.OPEN.value + ) + proposed_changed_closed = await service.client.filters( + kind="CoreProposedChange", include=["id", "source_branch"], state__value=ProposedChangeState.CLOSED.value + ) + + events = [] + for proposed_change in proposed_changed_opened + proposed_changed_closed: + if proposed_change.source_branch.value == message.branch: + events.append(messages.RequestProposedChangeCancel(proposed_change=proposed_change.id)) + + for event in events: + event.assign_meta(parent=message) + await service.send(message=event) diff --git a/backend/infrahub/message_bus/rpc.py b/backend/infrahub/message_bus/rpc.py index baa6b85dee..3be1b2b843 100644 --- a/backend/infrahub/message_bus/rpc.py +++ b/backend/infrahub/message_bus/rpc.py @@ -1,147 +1,27 @@ from __future__ import annotations import asyncio -import json -from collections import defaultdict from typing import TYPE_CHECKING, List, MutableMapping -from infrahub_sdk import UUIDT +from infrahub.log import get_log_data, get_logger -from infrahub import config -from infrahub.database import InfrahubDatabase, get_db -from infrahub.log import clear_log_context, get_log_data, get_logger -from infrahub.message_bus import messages -from infrahub.message_bus.operations import execute_message -from infrahub.services import InfrahubServices -from infrahub.services.adapters.message_bus.rabbitmq import RabbitMQMessageBus -from infrahub.worker import WORKER_IDENTITY - -from . import InfrahubMessage, InfrahubResponse, Meta, get_broker +from . import InfrahubMessage, Meta from .messages import ROUTING_KEY_MAP -from .types import MessageTTL if TYPE_CHECKING: from aio_pika.abc import ( - AbstractChannel, AbstractExchange, - AbstractIncomingMessage, - AbstractQueue, - AbstractRobustConnection, ) log = get_logger() class InfrahubRpcClientBase: - connection: AbstractRobustConnection - channel: AbstractChannel - callback_queue: AbstractQueue - events_queue: AbstractQueue - loop: asyncio.AbstractEventLoop exchange: AbstractExchange - delayed_exchange: AbstractExchange - dlx: AbstractExchange def __init__(self) -> None: self.futures: MutableMapping[str, asyncio.Future] = {} self.loop = asyncio.get_running_loop() - self.service: InfrahubServices = InfrahubServices() - - async def connect(self) -> InfrahubRpcClient: - self.connection = await get_broker() - - if not self.connection: - return self - - self.channel = await self.connection.channel() - self.callback_queue = await self.channel.declare_queue(name=f"api-callback-{WORKER_IDENTITY}", exclusive=True) - self.events_queue = await self.channel.declare_queue(name=f"api-events-{WORKER_IDENTITY}", exclusive=True) - - await self.callback_queue.consume(self.on_response, no_ack=True) - await self.events_queue.consume(self.on_response, no_ack=True) - self.exchange = await self.channel.declare_exchange( - f"{config.SETTINGS.broker.namespace}.events", type="topic", durable=True - ) - self.dlx = await self.channel.declare_exchange( - f"{config.SETTINGS.broker.namespace}.dlx", type="topic", durable=True - ) - - queue = await self.channel.declare_queue( - f"{config.SETTINGS.broker.namespace}.rpcs", durable=True, arguments={"x-queue-type": "quorum"} - ) - - worker_bindings = [ - "check.*.*", - "event.*.*", - "finalize.*.*", - "git.*.*", - "request.*.*", - "transform.*.*", - "trigger.*.*", - ] - self.delayed_exchange = await self.channel.declare_exchange( - f"{config.SETTINGS.broker.namespace}.delayed", type="headers", durable=True - ) - for routing_key in worker_bindings: - await queue.bind(self.exchange, routing_key=routing_key) - await queue.bind(self.dlx, routing_key=routing_key) - - for ttl in MessageTTL.variations(): - ttl_queue = await self.channel.declare_queue( - f"{config.SETTINGS.broker.namespace}.delay.{ttl.name.lower()}_seconds", - durable=True, - arguments={ - "x-dead-letter-exchange": self.dlx.name, - "x-message-ttl": ttl.value, - "x-queue-type": "quorum", - }, - ) - await ttl_queue.bind( - self.delayed_exchange, - arguments={"x-match": "all", "delay": ttl.value}, - ) - - await self.events_queue.bind(self.exchange, routing_key="refresh.registry.*") - - db = InfrahubDatabase(driver=await get_db()) - self.service = InfrahubServices( - database=db, - message_bus=RabbitMQMessageBus( - channel=self.channel, exchange=self.exchange, delayed_exchange=self.delayed_exchange - ), - ) - - return self - - async def on_response(self, message: AbstractIncomingMessage) -> None: - if message.correlation_id: - future: asyncio.Future = self.futures.pop(message.correlation_id) - - if future: - future.set_result(message) - return - - clear_log_context() - if message.routing_key in messages.MESSAGE_MAP: - await execute_message(routing_key=message.routing_key, message_body=message.body, service=self.service) - else: - log.error("Invalid message received", message=f"{message!r}") - - async def rpc(self, message: InfrahubMessage) -> InfrahubResponse: - correlation_id = str(UUIDT()) - - future = self.loop.create_future() - self.futures[correlation_id] = future - - log_data = get_log_data() - request_id = log_data.get("request_id", "") - message.meta = Meta(request_id=request_id, correlation_id=correlation_id, reply_to=self.callback_queue.name) - - await self.send(message=message) - - response = await future - data = json.loads(response.body) - return InfrahubResponse(**data) async def send(self, message: InfrahubMessage) -> None: routing_key = ROUTING_KEY_MAP.get(type(message)) @@ -165,18 +45,7 @@ class InfrahubRpcClientTesting(InfrahubRpcClientBase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.responses = defaultdict(list) - self.replies: List[InfrahubResponse] = [] self.sent: List[InfrahubMessage] = [] - async def connect(self) -> InfrahubRpcClient: - return self - - async def rpc(self, message: InfrahubMessage) -> InfrahubResponse: - return self.replies.pop() - - async def add_mock_reply(self, response: InfrahubResponse) -> None: - self.replies.append(response) - async def send(self, message: InfrahubMessage) -> None: self.sent.append(message) diff --git a/backend/infrahub/message_bus/worker.py b/backend/infrahub/message_bus/worker.py deleted file mode 100644 index c27e0abb3a..0000000000 --- a/backend/infrahub/message_bus/worker.py +++ /dev/null @@ -1,18 +0,0 @@ -from aio_pika.abc import AbstractIncomingMessage - -from infrahub.log import clear_log_context, get_logger -from infrahub.message_bus import messages -from infrahub.message_bus.operations import execute_message -from infrahub.services import InfrahubServices - -log = get_logger() - - -class WorkerCallback: - def __init__(self, service: InfrahubServices) -> None: - self.service = service - - async def run_command(self, message: AbstractIncomingMessage) -> None: - clear_log_context() - if message.routing_key in messages.MESSAGE_MAP: - await execute_message(routing_key=message.routing_key, message_body=message.body, service=self.service) diff --git a/backend/infrahub/serve/log.py b/backend/infrahub/serve/log.py index 15f1959ff8..18c4f06a25 100644 --- a/backend/infrahub/serve/log.py +++ b/backend/infrahub/serve/log.py @@ -1,27 +1,29 @@ +from typing import Any + from gunicorn.glogging import Logger from infrahub.log import get_logger class GunicornLogger(Logger): - def __init__(self, cfg): + def __init__(self, cfg: Any): super().__init__(cfg) self.logger = get_logger("gunicorn") - def critical(self, msg, *args, **kwargs): + def critical(self, msg: str, *args: Any, **kwargs: Any) -> None: self.logger.critical(msg, *args, **kwargs) - def error(self, msg, *args, **kwargs): + def error(self, msg: str, *args: Any, **kwargs: Any) -> None: self.logger.error(msg, *args, **kwargs) - def warning(self, msg, *args, **kwargs): + def warning(self, msg: str, *args: Any, **kwargs: Any) -> None: self.logger.warning(msg, *args, **kwargs) - def info(self, msg, *args, **kwargs): + def info(self, msg: str, *args: Any, **kwargs: Any) -> None: self.logger.info(msg, *args, **kwargs) - def debug(self, msg, *args, **kwargs): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: self.logger.debug(msg, *args, **kwargs) - def exception(self, msg, *args, **kwargs): + def exception(self, msg: str, *args: Any, **kwargs: Any) -> None: self.logger.exception(msg, *args, **kwargs) diff --git a/backend/infrahub/server.py b/backend/infrahub/server.py index d2d5b2ab32..bce8b766d3 100644 --- a/backend/infrahub/server.py +++ b/backend/infrahub/server.py @@ -1,37 +1,38 @@ -import asyncio import logging import os import time +from functools import partial from typing import Awaitable, Callable from asgi_correlation_id import CorrelationIdMiddleware from asgi_correlation_id.context import correlation_id from fastapi import FastAPI, Request, Response from fastapi.logger import logger -from fastapi.responses import JSONResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates +from infrahub_sdk.timestamp import TimestampFormatError from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor +from pydantic import ValidationError from starlette_exporter import PrometheusMiddleware, handle_metrics -import infrahub.config as config -from infrahub import __version__ +from infrahub import __version__, config from infrahub.api import router as api -from infrahub.api.background import BackgroundRunner +from infrahub.api.exception_handlers import generic_api_exception_handler +from infrahub.components import ComponentType from infrahub.core.initialization import initialization from infrahub.database import InfrahubDatabase, InfrahubDatabaseMode, get_db from infrahub.exceptions import Error -from infrahub.graphql.app import InfrahubGraphQLApp +from infrahub.graphql.api.endpoints import router as graphql_router from infrahub.lock import initialize_lock from infrahub.log import clear_log_context, get_logger, set_log_data from infrahub.message_bus import close_broker_connection, connect_to_broker from infrahub.message_bus.rpc import InfrahubRpcClient from infrahub.middleware import InfrahubCORSMiddleware -from infrahub.services import services +from infrahub.services import InfrahubServices, services +from infrahub.services.adapters.cache.redis import RedisCache +from infrahub.services.adapters.message_bus.rabbitmq import RabbitMQMessageBus from infrahub.trace import add_span_exception, configure_trace, get_traceid, get_tracer -# pylint: disable=too-many-locals - app = FastAPI( title="Infrahub", version=__version__, @@ -78,26 +79,29 @@ async def app_initialization(): ) # Initialize database Driver and load local registry - app.state.db = InfrahubDatabase(mode=InfrahubDatabaseMode.DRIVER, driver=await get_db()) + database = app.state.db = InfrahubDatabase(mode=InfrahubDatabaseMode.DRIVER, driver=await get_db()) initialize_lock() + async with app.state.db.start_session() as db: + await initialization(db=db) + # Initialize connection to the RabbitMQ bus await connect_to_broker() + message_bus = config.OVERRIDE.message_bus or RabbitMQMessageBus() + cache = config.OVERRIDE.cache or RedisCache() + service = InfrahubServices( + cache=cache, database=database, message_bus=message_bus, component_type=ComponentType.API_SERVER + ) + await service.initialize() + # service.message_bus = app.state.rpc_client.rabbitmq + services.prepare(service=service) # Initialize RPC Client - app.state.rpc_client = await InfrahubRpcClient().connect() - services.prepare(service=app.state.rpc_client.service) - - async with app.state.db.start_session() as db: - await initialization(db=db) - - # Initialize the Background Runner - if config.SETTINGS.miscellaneous.start_background_runner: - app.state.runner = BackgroundRunner( - db=app.state.db, database_name=config.SETTINGS.database.database_name, interval=10 - ) - asyncio.create_task(app.state.runner.run()) + rpc_client = InfrahubRpcClient() + rpc_client.exchange = message_bus.rpc_exchange + app.state.rpc_client = rpc_client + app.state.service = service @app.on_event("shutdown") @@ -129,18 +133,18 @@ async def add_process_time_header(request: Request, call_next): return response -app.add_middleware(CorrelationIdMiddleware) - - -@app.exception_handler(Error) -async def api_exception_handler_base_infrahub_error(_: Request, exc: Error) -> JSONResponse: - """Generic API Exception handler.""" - - error = exc.api_response() - add_span_exception(exc) - return JSONResponse(status_code=exc.HTTP_CODE, content=error) +@app.middleware("http") +async def add_telemetry_span_exception( + request: Request, call_next: Callable[[Request], Awaitable[Response]] +) -> Response: + try: + return await call_next(request) + except Exception as exc: + add_span_exception(exc) + raise +app.add_middleware(CorrelationIdMiddleware) app.add_middleware( PrometheusMiddleware, app_name="infrahub", @@ -151,16 +155,16 @@ async def api_exception_handler_base_infrahub_error(_: Request, exc: Error) -> J ) app.add_middleware(InfrahubCORSMiddleware) +app.add_exception_handler(Error, generic_api_exception_handler) +app.add_exception_handler(TimestampFormatError, partial(generic_api_exception_handler, http_code=400)) +app.add_exception_handler(ValidationError, partial(generic_api_exception_handler, http_code=400)) + app.add_route(path="/metrics", route=handle_metrics) -app.add_route(path="/graphql", route=InfrahubGraphQLApp(playground=True), methods=["GET", "POST", "OPTIONS"]) -app.add_route( - path="/graphql/{branch_name:path}", route=InfrahubGraphQLApp(playground=True), methods=["GET", "POST", "OPTIONS"] -) -# app.add_websocket_route(path="/graphql", route=InfrahubGraphQLApp()) -# app.add_websocket_route(path="/graphql/{branch_name:str}", route=InfrahubGraphQLApp()) +app.include_router(graphql_router) if os.path.exists(FRONTEND_ASSET_DIRECTORY) and os.path.isdir(FRONTEND_ASSET_DIRECTORY): app.mount("/assets", StaticFiles(directory=FRONTEND_ASSET_DIRECTORY), "assets") + app.mount("/favicons", StaticFiles(directory=FRONTEND_ASSET_DIRECTORY), "favicons") @app.get("/{rest_of_path:path}", include_in_schema=False) diff --git a/backend/infrahub/services/__init__.py b/backend/infrahub/services/__init__.py index 787a942bc3..73c78199fe 100644 --- a/backend/infrahub/services/__init__.py +++ b/backend/infrahub/services/__init__.py @@ -1,15 +1,19 @@ -from typing import Optional +from typing import Awaitable, Callable, Optional from infrahub_sdk import InfrahubClient +from infrahub.components import ComponentType from infrahub.database import InfrahubDatabase from infrahub.exceptions import InitializationError +from infrahub.log import get_logger from infrahub.message_bus import InfrahubMessage, InfrahubResponse, Meta from infrahub.message_bus.messages import ROUTING_KEY_MAP from infrahub.message_bus.types import MessageTTL from .adapters.cache import InfrahubCache from .adapters.message_bus import InfrahubMessageBus +from .protocols import InfrahubLogger +from .scheduler import InfrahubScheduler class InfrahubServices: @@ -19,11 +23,16 @@ def __init__( client: Optional[InfrahubClient] = None, database: Optional[InfrahubDatabase] = None, message_bus: Optional[InfrahubMessageBus] = None, + log: Optional[InfrahubLogger] = None, + component_type: Optional[ComponentType] = None, ): self.cache = cache or InfrahubCache() self._client = client self._database = database self.message_bus = message_bus or InfrahubMessageBus() + self.log = log or get_logger() + self.component_type = component_type or ComponentType.NONE + self.scheduler = InfrahubScheduler() @property def client(self) -> InfrahubClient: @@ -39,6 +48,11 @@ def database(self) -> InfrahubDatabase: return self._database + async def initialize(self) -> None: + """Initialize the Services""" + await self.message_bus.initialize(service=self) + await self.scheduler.initialize(service=self) + async def send(self, message: InfrahubMessage, delay: Optional[MessageTTL] = None) -> None: routing_key = ROUTING_KEY_MAP.get(type(message)) if not routing_key: @@ -63,4 +77,7 @@ def prepare(self, service: InfrahubServices) -> None: self.send = self.service.send +ServiceFunction = Callable[[InfrahubServices], Awaitable[None]] + + services = ServiceManager() diff --git a/backend/infrahub/services/adapters/cache/__init__.py b/backend/infrahub/services/adapters/cache/__init__.py index b61e548eb7..5d8c6c7a86 100644 --- a/backend/infrahub/services/adapters/cache/__init__.py +++ b/backend/infrahub/services/adapters/cache/__init__.py @@ -12,6 +12,8 @@ async def list_keys(self, filter_pattern: str) -> List[str]: """Return a list of active keys that match the provided filter.""" raise NotImplementedError() - async def set(self, key: str, value: str, expires: Optional[int] = None) -> bool: + async def set( + self, key: str, value: str, expires: Optional[int] = None, not_exists: bool = False + ) -> Optional[bool]: """Set a value in the cache.""" raise NotImplementedError() diff --git a/backend/infrahub/services/adapters/cache/redis.py b/backend/infrahub/services/adapters/cache/redis.py index ecbb9062cb..4a258dde95 100644 --- a/backend/infrahub/services/adapters/cache/redis.py +++ b/backend/infrahub/services/adapters/cache/redis.py @@ -32,5 +32,7 @@ async def list_keys(self, filter_pattern: str) -> List[str]: return [key.decode() for key in keys] - async def set(self, key: str, value: str, expires: Optional[int] = None) -> bool: - return await self.connection.set(name=key, value=value, ex=expires) + async def set( + self, key: str, value: str, expires: Optional[int] = None, not_exists: bool = False + ) -> Optional[bool]: + return await self.connection.set(name=key, value=value, ex=expires, nx=not_exists) diff --git a/backend/infrahub/services/adapters/message_bus/__init__.py b/backend/infrahub/services/adapters/message_bus/__init__.py index 39bff463ca..bbe414d2d7 100644 --- a/backend/infrahub/services/adapters/message_bus/__init__.py +++ b/backend/infrahub/services/adapters/message_bus/__init__.py @@ -1,12 +1,28 @@ -from typing import Optional +from __future__ import annotations -from infrahub.message_bus import InfrahubMessage -from infrahub.message_bus.types import MessageTTL +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from aio_pika.abc import AbstractExchange + + from infrahub.message_bus import InfrahubMessage, InfrahubResponse + from infrahub.message_bus.types import MessageTTL + from infrahub.services import InfrahubServices class InfrahubMessageBus: + # This exchange attribute should be removed when the InfrahubRpcClient + # class has been removed + rpc_exchange: Optional[AbstractExchange] = None + + async def initialize(self, service: InfrahubServices) -> None: + """Initialize the Message bus""" + async def publish(self, message: InfrahubMessage, routing_key: str, delay: Optional[MessageTTL] = None) -> None: raise NotImplementedError() async def reply(self, message: InfrahubMessage, routing_key: str) -> None: raise NotImplementedError() + + async def rpc(self, message: InfrahubMessage) -> InfrahubResponse: + raise NotImplementedError() diff --git a/backend/infrahub/services/adapters/message_bus/rabbitmq.py b/backend/infrahub/services/adapters/message_bus/rabbitmq.py index f38df95d6f..95187aaa00 100644 --- a/backend/infrahub/services/adapters/message_bus/rabbitmq.py +++ b/backend/infrahub/services/adapters/message_bus/rabbitmq.py @@ -1,21 +1,141 @@ -from typing import Optional +from __future__ import annotations -from aio_pika.abc import AbstractChannel, AbstractExchange +import asyncio +import json +from typing import TYPE_CHECKING, MutableMapping, Optional -from infrahub.message_bus import InfrahubMessage +from infrahub_sdk import UUIDT + +from infrahub import config +from infrahub.components import ComponentType +from infrahub.log import clear_log_context, get_log_data +from infrahub.message_bus import InfrahubMessage, InfrahubResponse, Meta, get_broker, messages +from infrahub.message_bus.operations import execute_message from infrahub.message_bus.types import MessageTTL from infrahub.services.adapters.message_bus import InfrahubMessageBus +from infrahub.worker import WORKER_IDENTITY + +if TYPE_CHECKING: + from aio_pika.abc import ( + AbstractChannel, + AbstractExchange, + AbstractIncomingMessage, + AbstractQueue, + AbstractRobustConnection, + ) + + from infrahub.services import InfrahubServices class RabbitMQMessageBus(InfrahubMessageBus): def __init__( - self, channel: AbstractChannel, exchange: AbstractExchange, delayed_exchange: AbstractExchange + self, ) -> None: - self.channel = channel - self.exchange = exchange - self.delayed_exchange = delayed_exchange + self.channel: AbstractChannel + self.exchange: AbstractExchange + self.delayed_exchange: AbstractExchange + self.service: InfrahubServices + self.connection: AbstractRobustConnection + self.callback_queue: AbstractQueue + self.events_queue: AbstractQueue + self.dlx: AbstractExchange + + self.loop = asyncio.get_running_loop() + self.futures: MutableMapping[str, asyncio.Future] = {} + + async def initialize(self, service: InfrahubServices) -> None: + self.service = service + if self.service.component_type == ComponentType.API_SERVER: + await self._initialize_api_server() + elif self.service.component_type == ComponentType.GIT_AGENT: + await self._initialize_git_worker() + + async def on_callback(self, message: AbstractIncomingMessage) -> None: + if message.correlation_id: + future: asyncio.Future = self.futures.pop(message.correlation_id) + + if future: + future.set_result(message) + return + + clear_log_context() + if message.routing_key in messages.MESSAGE_MAP: + await execute_message(routing_key=message.routing_key, message_body=message.body, service=self.service) + else: + self.service.log.error("Invalid message received", message=f"{message!r}") + + async def _initialize_api_server(self) -> None: + self.connection = await get_broker() + self.channel = await self.connection.channel() + self.callback_queue = await self.channel.declare_queue(name=f"api-callback-{WORKER_IDENTITY}", exclusive=True) + self.events_queue = await self.channel.declare_queue(name=f"api-events-{WORKER_IDENTITY}", exclusive=True) + + await self.callback_queue.consume(self.on_callback, no_ack=True) + await self.events_queue.consume(self.on_callback, no_ack=True) + self.exchange = await self.channel.declare_exchange( + f"{config.SETTINGS.broker.namespace}.events", type="topic", durable=True + ) + self.rpc_exchange = self.exchange + self.dlx = await self.channel.declare_exchange( + f"{config.SETTINGS.broker.namespace}.dlx", type="topic", durable=True + ) + + queue = await self.channel.declare_queue( + f"{config.SETTINGS.broker.namespace}.rpcs", + durable=True, + arguments={"x-max-priority": 5}, + ) + + worker_bindings = [ + "check.*.*", + "event.*.*", + "finalize.*.*", + "git.*.*", + "request.*.*", + "transform.*.*", + "trigger.*.*", + ] + self.delayed_exchange = await self.channel.declare_exchange( + f"{config.SETTINGS.broker.namespace}.delayed", type="headers", durable=True + ) + for routing_key in worker_bindings: + await queue.bind(self.exchange, routing_key=routing_key) + await queue.bind(self.dlx, routing_key=routing_key) + + for ttl in MessageTTL.variations(): + ttl_queue = await self.channel.declare_queue( + f"{config.SETTINGS.broker.namespace}.delay.{ttl.name.lower()}_seconds", + durable=True, + arguments={ + "x-dead-letter-exchange": self.dlx.name, + "x-message-ttl": ttl.value, + "x-max-priority": 5, + }, + ) + await ttl_queue.bind( + self.delayed_exchange, + arguments={"x-match": "all", "delay": ttl.value}, + ) + + await self.events_queue.bind(self.exchange, routing_key="refresh.registry.*") + + async def _initialize_git_worker(self) -> None: + connection = await get_broker() + # Create a channel and subscribe to the incoming RPC queue + self.channel = await connection.channel() + + events_queue = await self.channel.declare_queue(name=f"worker-events-{WORKER_IDENTITY}", exclusive=True) + + self.exchange = await self.channel.declare_exchange( + f"{config.SETTINGS.broker.namespace}.events", type="topic", durable=True + ) + await events_queue.bind(self.exchange, routing_key="refresh.registry.*") + self.delayed_exchange = await self.channel.get_exchange(name=f"{config.SETTINGS.broker.namespace}.delayed") + + await events_queue.consume(callback=self.on_callback, no_ack=True) async def publish(self, message: InfrahubMessage, routing_key: str, delay: Optional[MessageTTL] = None) -> None: + message.assign_priority(priority=messages.message_priority(routing_key=routing_key)) if delay: message.assign_header(key="delay", value=delay.value) await self.delayed_exchange.publish(message, routing_key=routing_key) @@ -24,3 +144,19 @@ async def publish(self, message: InfrahubMessage, routing_key: str, delay: Optio async def reply(self, message: InfrahubMessage, routing_key: str) -> None: await self.channel.default_exchange.publish(message, routing_key=routing_key) + + async def rpc(self, message: InfrahubMessage) -> InfrahubResponse: + correlation_id = str(UUIDT()) + + future = self.loop.create_future() + self.futures[correlation_id] = future + + log_data = get_log_data() + request_id = log_data.get("request_id", "") + message.meta = Meta(request_id=request_id, correlation_id=correlation_id, reply_to=self.callback_queue.name) + + await self.service.send(message=message) + + response = await future + data = json.loads(response.body) + return InfrahubResponse(**data) diff --git a/backend/infrahub/services/protocols.py b/backend/infrahub/services/protocols.py new file mode 100644 index 0000000000..4bd2a09030 --- /dev/null +++ b/backend/infrahub/services/protocols.py @@ -0,0 +1,21 @@ +from typing import Any, Optional, Protocol + + +class InfrahubLogger(Protocol): + def debug(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + """Send a debug event""" + + def info(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + """Send an info event""" + + def warning(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + """Send a warning event""" + + def error(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + """Send an error event.""" + + def critical(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + """Send a critical event.""" + + def exception(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + """Send an exception event.""" diff --git a/backend/infrahub/services/scheduler.py b/backend/infrahub/services/scheduler.py new file mode 100644 index 0000000000..c14d466705 --- /dev/null +++ b/backend/infrahub/services/scheduler.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +import asyncio +import random +from dataclasses import dataclass +from typing import TYPE_CHECKING, List + +from infrahub import config +from infrahub.components import ComponentType +from infrahub.tasks.keepalive import refresh_api_server_components +from infrahub.tasks.recurring import resync_repositories, trigger_branch_refresh + +if TYPE_CHECKING: + from infrahub.services import InfrahubServices, ServiceFunction + + +@dataclass +class Schedule: + name: str + interval: int + function: ServiceFunction + start_delay: int = 30 + + +class InfrahubScheduler: + def __init__(self) -> None: + self.service: InfrahubServices + self.running: bool = False + self.schedules: List[Schedule] = [] + + async def initialize(self, service: InfrahubServices) -> None: + self.service = service + + self.running = config.SETTINGS.miscellaneous.start_background_runner + if self.service.component_type == ComponentType.API_SERVER: + # Add some randomness to the interval to avoid having all workers pulling the latest update at the same time + random_number = 30 + random.randint(1, 4) - 2 + + schedules = [ + Schedule(name="refresh_api_components", interval=10, function=refresh_api_server_components), + Schedule(name="resync_repositories", interval=10, function=resync_repositories), + Schedule( + name="branch_refresh", interval=10, function=trigger_branch_refresh, start_delay=random_number + ), + ] + self.schedules.extend(schedules) + await self.start_schedule() + + async def start_schedule(self) -> None: + for schedule in self.schedules: + asyncio.create_task( + run_schedule(schedule=schedule, service=self.service), name=f"scheduled_task_{schedule.name}" + ) + + +async def run_schedule(schedule: Schedule, service: InfrahubServices) -> None: + """Execute the task provided in the schedule as per the defined interval + + Once the service is marked to be shutdown the scheduler will stop executing tasks. + """ + for _ in range(schedule.start_delay): + if not service.scheduler.running: + return + await asyncio.sleep(delay=1) + + service.log.info("Started recurring task", task=schedule.name) + + while service.scheduler.running: + try: + await schedule.function(service) + except Exception as exc: # pylint: disable=broad-exception-caught + service.log.error(str(exc)) + for _ in range(schedule.interval): + if not service.scheduler.running: + return + await asyncio.sleep(delay=1) diff --git a/backend/infrahub/storage.py b/backend/infrahub/storage.py new file mode 100644 index 0000000000..9c97a1a2bd --- /dev/null +++ b/backend/infrahub/storage.py @@ -0,0 +1,59 @@ +import tempfile +from typing import Any, BinaryIO + +import botocore.exceptions +import fastapi_storages +from typing_extensions import Self + +from infrahub import config +from infrahub.config import StorageSettings +from infrahub.exceptions import NodeNotFound + + +class InfrahubS3ObjectStorage(fastapi_storages.S3Storage): + def __init__(self, **kwargs: Any) -> None: + for key, value in kwargs.items(): + if hasattr(self, key): + setattr(self, key, value) + super().__init__() + + def open(self, name: str) -> BinaryIO: + f = tempfile.NamedTemporaryFile() # pylint: disable=consider-using-with + self._bucket.download_fileobj(name, f) + f.flush() + f.seek(0) + return f # type: ignore + + +fastapi_storages.InfrahubS3ObjectStorage = InfrahubS3ObjectStorage + + +class InfrahubObjectStorage: + _settings: StorageSettings + _storage: fastapi_storages.base.BaseStorage + + def __init__(self, settings: StorageSettings) -> None: + self._settings = settings + + driver = getattr(fastapi_storages, self._settings.driver.name) + + driver_settings = getattr(self._settings, self._settings.driver.value.lower()) + self._storage = driver(**driver_settings.dict(by_alias=True)) + + @classmethod + async def init(cls, settings: StorageSettings) -> Self: + return cls(settings) + + def store(self, identifier: str, content: bytes) -> None: + with tempfile.NamedTemporaryFile() as f: + f.write(content) + self._storage.write(f, identifier) + + def retrieve(self, identifier: str) -> str: + try: + with self._storage.open(identifier) as f: + return f.read().decode() + except (FileNotFoundError, botocore.exceptions.ClientError): + raise NodeNotFound( # pylint: disable=raise-missing-from + branch_name=config.SETTINGS.main.default_branch, node_type="StorageObject", identifier=identifier + ) diff --git a/backend/infrahub/storage/local.py b/backend/infrahub/storage/local.py deleted file mode 100644 index ac87dda892..0000000000 --- a/backend/infrahub/storage/local.py +++ /dev/null @@ -1,62 +0,0 @@ -import os -from pathlib import Path -from typing import Optional, Union - -from pydantic import BaseSettings - -from infrahub import config -from infrahub.exceptions import NodeNotFound - -from .main import InfrahubObjectStorage - - -class LocalStorageSettings(BaseSettings): - directory: str = "storage" - - class Config: - env_prefix = "INFRAHUB_STORAGE_LOCAL_" - case_sensitive = False - - -class InfrahubLocalStorage(InfrahubObjectStorage): - settings: LocalStorageSettings - - def __init__(self, settings: Optional[Union[dict, LocalStorageSettings]] = None) -> None: - if isinstance(settings, LocalStorageSettings): - self.settings = settings - elif isinstance(settings, dict): - self.settings = LocalStorageSettings(**settings) - else: - self.settings = LocalStorageSettings() - - if not os.path.isdir(self.directory_root): - raise ValueError( - f"A valid directory must be provided for InfrahubLocalStorage, instead of {self.directory_root}" - ) - - async def store(self, identifier: str, content: bytes) -> None: - fileh = Path(self.generate_path(identifier=identifier)) - fileh.write_bytes(data=content) - - async def retrieve(self, identifier: str, encoding: str = "utf-8") -> str: - fileh = Path(self.generate_path(identifier=identifier)) - - if not fileh.exists(): - raise NodeNotFound( - branch_name=config.SETTINGS.main.default_branch, node_type="StorageObject", identifier=identifier - ) - - return fileh.read_text(encoding=encoding) - - @property - def directory_root(self) -> str: - """Return the path to the root directory for the storage.""" - current_dir = os.getcwd() - storage_directory = self.settings.directory - if not os.path.isabs(storage_directory): - storage_directory = os.path.join(current_dir, self.settings.directory) - - return storage_directory - - def generate_path(self, identifier: str) -> str: - return os.path.join(self.directory_root, identifier) diff --git a/backend/infrahub/storage/main.py b/backend/infrahub/storage/main.py deleted file mode 100644 index 9c89ed06b9..0000000000 --- a/backend/infrahub/storage/main.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing_extensions import Self - - -class InfrahubObjectStorage: - @classmethod - async def init(cls, **kwargs) -> Self: - return cls(**kwargs) - - async def store(self, identifier: str, content: bytes): - raise NotImplementedError - - async def retrieve(self, identifier: str): - raise NotImplementedError diff --git a/backend/infrahub/tasks/keepalive.py b/backend/infrahub/tasks/keepalive.py new file mode 100644 index 0000000000..de8a2b784d --- /dev/null +++ b/backend/infrahub/tasks/keepalive.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from infrahub.core.timestamp import Timestamp +from infrahub.worker import WORKER_IDENTITY + +if TYPE_CHECKING: + from infrahub.services import InfrahubServices + + +async def refresh_api_server_components(service: InfrahubServices) -> None: + """Update API server worker information in the cache + + The goal of this job is to provide an updated list of API server workers in the cache, it will have a freshness + of 15 seconds after which workers that haven't updated their entry in the cache will be removed. + + The function also keeps the primary API server ID up to date. The primary ID is used within other tasks to ensure + that only one worker is responsible for scheduling specific tasks. + """ + service.log.debug("Refreshing API workers in cache") + + await service.cache.set(key=f"api_server:{WORKER_IDENTITY}", value=str(Timestamp()), expires=15) + + result = await service.cache.set(key="primary_api_server_id", value=WORKER_IDENTITY, expires=15, not_exists=True) + if not result: + service.log.debug("Primary node already set") + primary_id = await service.cache.get(key="primary_api_server_id") + if primary_id == WORKER_IDENTITY: + service.log.debug("Primary node set but same as ours, refreshing lifetime") + await service.cache.set(key="primary_api_server_id", value=WORKER_IDENTITY, expires=15) diff --git a/backend/infrahub/tasks/recurring.py b/backend/infrahub/tasks/recurring.py new file mode 100644 index 0000000000..2c2837ab97 --- /dev/null +++ b/backend/infrahub/tasks/recurring.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from infrahub.message_bus import messages +from infrahub.worker import WORKER_IDENTITY + +from .registry import refresh_branches + +if TYPE_CHECKING: + from infrahub.services import InfrahubServices + + +async def trigger_branch_refresh(service: InfrahubServices) -> None: + service.log.debug("Running branch refresh task") + async with service.database.start_session() as db: + await refresh_branches(db=db) + + +async def resync_repositories(service: InfrahubServices) -> None: + primary_identity = await service.cache.get("primary_api_server_id") + if primary_identity == WORKER_IDENTITY: + service.log.debug( + f"Primary identity={primary_identity} matches my identity={WORKER_IDENTITY}. Posting sync of repo message." + ) + await service.send(message=messages.RequestGitSync()) diff --git a/backend/infrahub/tasks/registry.py b/backend/infrahub/tasks/registry.py index 2a0981ecf7..f1c464b152 100644 --- a/backend/infrahub/tasks/registry.py +++ b/backend/infrahub/tasks/registry.py @@ -1,11 +1,13 @@ -from typing import List +from typing import TYPE_CHECKING, List from infrahub import lock from infrahub.core import registry -from infrahub.core.branch import Branch from infrahub.database import InfrahubDatabase from infrahub.log import get_logger +if TYPE_CHECKING: + from infrahub.core.branch import Branch + log = get_logger() @@ -17,7 +19,7 @@ async def refresh_branches(db: InfrahubDatabase): """ async with lock.registry.local_schema_lock(): - branches: List[Branch] = await Branch.get_list(db=db) + branches: List[Branch] = await registry.branch_object.get_list(db=db) active_branches = [branch.name for branch in branches] for new_branch in branches: if new_branch.name in registry.branch: diff --git a/backend/infrahub/test_data/dataset01.py b/backend/infrahub/test_data/dataset01.py index 388cf5f8ff..ab35f1e78d 100644 --- a/backend/infrahub/test_data/dataset01.py +++ b/backend/infrahub/test_data/dataset01.py @@ -6,6 +6,8 @@ # pylint: skip-file ROLES = ["spine", "leaf", "firewall", "server", "loopback"] +STATUSES = ["active", "provisionning", "maintenance"] +TAGS = ["blue", "green", "red"] DEVICES = ( ("spine1", "active", "MX480", "profile1", "spine", ["red", "green"]), @@ -89,53 +91,29 @@ async def load_data(db: InfrahubDatabase, nbr_devices: int = None): # ------------------------------------------ # Create Status, Role & DeviceProfile # ------------------------------------------ - statuses_dict = {} - roles_dict = {} + # statuses_dict = {} + # roles_dict = {} LOGGER.info("Creating Site") site_hq = await Node.init(db=db, schema="BuiltinLocation") await site_hq.new(db=db, name="HQ", type="Site") await site_hq.save(db=db) - LOGGER.info("Creating Roles & Status") - for role in ROLES: - obj = await Node.init(db=db, schema="BuiltinRole") - await obj.new(db=db, description=role.title(), name=role) - await obj.save(db=db) - roles_dict[role] = obj - LOGGER.info(f"Created Role: {role}") - - STATUSES = ["active", "provisionning", "maintenance"] - for status in STATUSES: - obj = await Node.init(db=db, schema="BuiltinStatus") - await obj.new(db=db, description=status.title(), name=status) - await obj.save(db=db) - statuses_dict[status] = obj - LOGGER.info(f"Created Status: {status}") - - # TAGS = ["blue", "green", "red"] - # for tag in TAGS: - # obj = await Node.init(db=db, schema="Tag") - # await obj.new(db=db, name=tag) - # await obj.save(db=db) - # tags_dict[tag] = obj - # LOGGER.info(f"Created Tag: {tag}") - - active_status = statuses_dict["active"] - role_loopback = roles_dict["loopback"] + active_status = "active" + role_loopback = "loopback" LOGGER.info("Creating Device") for idx, device in enumerate(DEVICES): if nbr_devices and nbr_devices <= idx: continue - status = statuses_dict[device[1]] + status = device[1] - role_id = None + role = None if device[4]: - role_id = roles_dict[device[4]].id + role = device[4] obj = await Node.init(db=db, schema="InfraDevice") - await obj.new(db=db, name=device[0], status=status.id, type=device[2], role=role_id, site=site_hq) + await obj.new(db=db, name=device[0], status=status, type=device[2], role=role, site=site_hq) await obj.save(db=db) LOGGER.info(f"- Created Device: {device[0]}") @@ -148,8 +126,8 @@ async def load_data(db: InfrahubDatabase, nbr_devices: int = None): device="spine1", name="Loopback0", enabled=True, - status=active_status.id, - role=role_loopback.id, + status=active_status, + role=role_loopback, speed=10000, ) await intf.save(db=db) @@ -165,7 +143,7 @@ async def load_data(db: InfrahubDatabase, nbr_devices: int = None): INTERFACES = ["Ethernet0", "Ethernet1", "Ethernet2"] for intf_idx, intf_name in enumerate(INTERFACES): intf_role = INTERFACE_ROLES[device[4]][intf_idx] - intf_role_id = roles_dict[intf_role].id + # intf_role_id = roles_dict[intf_role].id enabled = True if intf_idx in [0, 1]: @@ -178,8 +156,8 @@ async def load_data(db: InfrahubDatabase, nbr_devices: int = None): name=intf_name, speed=10000, enabled=enabled, - status=active_status.id, - role=intf_role_id, + status=active_status, + role=intf_role, ) await intf.save(db=db) diff --git a/backend/infrahub/test_data/dataset03.py b/backend/infrahub/test_data/dataset03.py index 093efc56a4..57e433ca20 100644 --- a/backend/infrahub/test_data/dataset03.py +++ b/backend/infrahub/test_data/dataset03.py @@ -180,24 +180,6 @@ async def load_data(db: InfrahubDatabase): # ------------------------------------------ # Create Status, Role & DeviceProfile # ------------------------------------------ - statuses_dict = {} - roles_dict = {} - - LOGGER.info("Creating Roles, Status & Tag") - for role in DEVICE_ROLES + INTF_ROLES: - obj = await Node.init(db=db, schema="Role") - await obj.new(db=db, description=role.title(), name=role) - await obj.save(db=db) - roles_dict[role] = obj - LOGGER.info(f" Created Role: {role}") - - for status in STATUSES: - obj = await Node.init(db=db, schema="Status") - await obj.new(db=db, description=status.title(), name=status) - await obj.save(db=db) - statuses_dict[status] = obj - LOGGER.info(f" Created Status: {status}") - for tag in TAGS: obj = await Node.init(db=db, schema="Tag") await obj.new(db=db, name=tag) @@ -205,7 +187,7 @@ async def load_data(db: InfrahubDatabase): tags_dict[tag] = obj LOGGER.info(f" Created Tag: {tag}") - active_status = statuses_dict["active"] + active_status = "active" internal_as = asn_dict["Duff"] LOGGER.info("Creating Site & Device") @@ -229,8 +211,6 @@ async def load_data(db: InfrahubDatabase): for idx, device in enumerate(DEVICES): device_name = f"{site_name}-{device[0]}" - status_id = statuses_dict[device[1]].id - role_id = roles_dict[device[4]].id device_type = device[2] obj = await Node.init(db=db, schema="Device") @@ -238,9 +218,9 @@ async def load_data(db: InfrahubDatabase): db=db, site=site, name=device_name, - status=status_id, + status=device[1], type=device[2], - role=role_id, + role=device[4], # source=pop_builder_account, asn=asn_dict["Duff"], tags=[tags_dict[tag_name] for tag_name in device[5]], @@ -257,8 +237,8 @@ async def load_data(db: InfrahubDatabase): device=obj.id, name="Loopback0", enabled=True, - status=active_status.id, - role=roles_dict["loopback"].id, + status=active_status, + role="loopback", speed=1000, # source=pop_builder_account, ) @@ -277,8 +257,8 @@ async def load_data(db: InfrahubDatabase): device=obj.id, name=INTERFACE_MGMT_NAME[device_type], enabled=True, - status=active_status.id, - role=roles_dict["management"].id, + status=active_status, + role="management", speed=1000, # source=pop_builder_account, ) @@ -291,7 +271,6 @@ async def load_data(db: InfrahubDatabase): # Other Interfaces for intf_idx, intf_name in enumerate(INTERFACE_NAMES[device_type]): intf_role = INTERFACE_ROLES_MAPPING[device[4]][intf_idx] - intf_role_id = roles_dict[intf_role].id intf = await Node.init(db=db, schema="Interface") await intf.new( @@ -300,8 +279,8 @@ async def load_data(db: InfrahubDatabase): name=intf_name, speed=10000, enabled=True, - status=active_status.id, - role=intf_role_id, + status=active_status, + role=intf_role, # source=pop_builder_account, ) await intf.save(db=db) @@ -356,8 +335,8 @@ async def load_data(db: InfrahubDatabase): vendor_id=f"{provider_name.upper()}-{UUIDT().short()}", provider=provider.id, # type=intf_role.upper(), - status=active_status.id, - role=roles_dict[intf_role].id, + status=active_status, + role=intf_role, ) await circuit.save(db=db) @@ -391,8 +370,8 @@ async def load_data(db: InfrahubDatabase): remote_ip=peer_ip, peer_group=peer_group_dict[peer_group_name], device=device_dict[device_name], - status=active_status.id, - role=roles_dict[intf_role].id, + status=active_status, + role=intf_role, ) await bgp_session.save(db=db) @@ -439,8 +418,8 @@ async def load_data(db: InfrahubDatabase): remote_ip=loopback2, peer_group=peer_group_dict[peer_group_name].id, device=device_dict[device1].id, - status=active_status.id, - role=roles_dict["backbone"], + status=active_status, + role="backbone", ) await obj.save(db=db) @@ -484,7 +463,7 @@ async def load_data(db: InfrahubDatabase): provider=provider, # type="DARK FIBER", status=active_status, - role=roles_dict["backbone"], + role="backbone", ) await obj.save(db=db) diff --git a/backend/infrahub/transforms.py b/backend/infrahub/transforms.py index 5db101314e..236d1d12f8 100644 --- a/backend/infrahub/transforms.py +++ b/backend/infrahub/transforms.py @@ -1,110 +1,11 @@ -import asyncio -import os -from abc import abstractmethod -from typing import Optional +from warnings import warn -from git import Repo -from infrahub_sdk import InfrahubClient +from infrahub_sdk.transforms import INFRAHUB_TRANSFORM_VARIABLE_TO_IMPORT, InfrahubTransform -INFRAHUB_TRANSFORM_VARIABLE_TO_IMPORT = "INFRAHUB_TRANSFORMS" +warn( + f"The module {__name__} is deprecated. Update to use infrahub_sdk.transforms instead.", + DeprecationWarning, + stacklevel=2, +) - -class InfrahubTransform: - name: Optional[str] = None - query: str = None - url: str = None - timeout: int = 10 - rebase: bool = True - - def __init__(self, branch=None, root_directory=None, server_url=None): - self.data = None - self.git = None - - self.logs = [] - - self.branch = branch - - self.server_url = server_url or os.environ.get("INFRAHUB_URL", "http://127.0.0.1:8000") - self.root_directory = root_directory or os.getcwd() - - self.client: InfrahubClient = None - - if not self.name: - self.name = self.__class__.__name__ - - if not self.query: - raise ValueError("A query must be provided") - if not self.url: - raise ValueError("A url must be provided") - - @classmethod - async def init(cls, client=None, *args, **kwargs): - """Async init method, If an existing InfrahubClient client hasn't been provided, one will be created automatically.""" - - item = cls(*args, **kwargs) - - if client: - item.client = client - else: - item.client = await InfrahubClient.init(address=item.server_url) - - return item - - # def log_error(self, message, object_id=None, object_type=None): - - # log_message = {"level": "ERROR", "message": message, "branch": self.branch_name} - # if object_id: - # log_message["object_id"] = object_id - # if object_type: - # log_message["object_type"] = object_type - # self.logs.append(log_message) - - # if self.output == "stdout": - # print(json.dumps(log_message)) - - # def log_info(self, message, object_id=None, object_type=None): - - # log_message = {"level": "INFO", "message": message, "branch": self.branch_name} - # if object_id: - # log_message["object_id"] = object_id - # if object_type: - # log_message["object_type"] = object_type - - # self.logs.append(log_message) - - # if self.output == "stdout": - # print(json.dumps(log_message)) - - @property - def branch_name(self) -> str: - """Return the name of the current git branch.""" - - if self.branch: - return self.branch - - if not self.git: - self.git = Repo(self.root_directory) - self.branch = str(self.git.active_branch) - - return self.branch - - @abstractmethod - def transform(self, data: dict): - pass - - async def collect_data(self): - """Query the result of the GraphQL Query defined in sef.query and return the result""" - - return await self.client.query_gql_query(name=self.query, branch_name=self.branch_name, rebase=self.rebase) - - async def run(self, data: dict = None) -> bool: - """Execute the transformation after collecting the data from the GraphQL query. - The result of the check is determined based on the presence or not of ERROR log messages.""" - - if not data: - data = await self.collect_data() - - if asyncio.iscoroutinefunction(self.transform): - return await self.transform(data=data) - - return self.transform(data=data) +__all__ = ["INFRAHUB_TRANSFORM_VARIABLE_TO_IMPORT", "InfrahubTransform"] diff --git a/backend/infrahub/types.py b/backend/infrahub/types.py index 118c5cdc7c..f255396f14 100644 --- a/backend/infrahub/types.py +++ b/backend/infrahub/types.py @@ -60,6 +60,7 @@ def get_graphql_filters(cls, name: str, include_properties: bool = True) -> Dict filters: Dict[str, typing.Any] = {} attr_class = cls.get_infrahub_class() filters[f"{name}__value"] = cls.graphql_filter() + filters[f"{name}__values"] = graphene.List(cls.graphql_filter) if not include_properties: return filters @@ -181,6 +182,15 @@ class Color(InfrahubDataType): infrahub = "String" +class Dropdown(InfrahubDataType): + label: str = "Dropdown" + graphql = graphene.String + graphql_query = "DropdownType" + graphql_input = "TextAttributeInput" + graphql_filter = graphene.String + infrahub = "Dropdown" + + class Number(InfrahubDataType): label: str = "Number" graphql = graphene.Int @@ -202,7 +212,7 @@ class Bandwidth(InfrahubDataType): class IPHost(InfrahubDataType): label: str = "IPHost" graphql = graphene.String - graphql_query = "TextAttributeType" + graphql_query = "IPHostType" graphql_input = "TextAttributeInput" graphql_filter = graphene.String infrahub = "IPHost" @@ -211,7 +221,7 @@ class IPHost(InfrahubDataType): class IPNetwork(InfrahubDataType): label: str = "IPNetwork" graphql = graphene.String - graphql_query = "TextAttributeType" + graphql_query = "IPNetworkType" graphql_input = "TextAttributeInput" graphql_filter = graphene.String infrahub = "IPNetwork" @@ -285,6 +295,7 @@ class Boolean(InfrahubDataType): ATTRIBUTE_TYPES: Dict[str, Type[InfrahubDataType]] = { "ID": ID, + "Dropdown": Dropdown, "Text": Text, "TextArea": TextArea, "DateTime": DateTime, diff --git a/backend/infrahub/utils.py b/backend/infrahub/utils.py index 99f263816d..5c4d78329d 100644 --- a/backend/infrahub/utils.py +++ b/backend/infrahub/utils.py @@ -32,6 +32,10 @@ def find_first_file_in_directory(directory: str) -> Optional[str]: return None +def format_label(slug: str) -> str: + return " ".join([word.title() for word in slug.split("_")]) + + class MetaEnum(EnumMeta): def __contains__(cls, item: Any) -> bool: try: diff --git a/backend/infrahub/visuals.py b/backend/infrahub/visuals.py new file mode 100644 index 0000000000..62fd0f4faa --- /dev/null +++ b/backend/infrahub/visuals.py @@ -0,0 +1,34 @@ +from typing import List + +COLOR_SELECTION = [ + "#ed6a5a", + "#f4f1bb", + "#9bc1bc", + "#5ca4a9", + "#e6ebe0", + "#52489c", + "#4062bb", + "#59c3c3", + "#56638a", + "#5d737e", + "#55505c", + "#592941", + "#498467", + "#e5d0e3", + "#748cab", + "#018e42", + "#50808e", + "#7b2d26", + "#9799ca", + "#51bbfe", +] + + +def select_color(existing: List[str]) -> str: + """Select a color from a predefined list without including anything from a list of existing colors.""" + for color in COLOR_SELECTION: + if color not in existing: + existing.append(color) + return color + + return "#ff0000" diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py index c0543c8913..eb73fb4f48 100644 --- a/backend/tests/conftest.py +++ b/backend/tests/conftest.py @@ -10,8 +10,9 @@ from infrahub_sdk.utils import str_to_bool from infrahub import config +from infrahub.components import ComponentType from infrahub.lock import initialize_lock -from infrahub.message_bus import InfrahubMessage +from infrahub.message_bus import InfrahubMessage, InfrahubResponse from infrahub.message_bus.operations import execute_message from infrahub.message_bus.types import MessageTTL from infrahub.services import InfrahubServices @@ -40,9 +41,11 @@ def event_loop(): @pytest.fixture(scope="module", autouse=True) -def execute_before_any_test(worker_id): +def execute_before_any_test(worker_id, tmpdir_factory): config.load_and_exit() + config.SETTINGS.storage.driver = config.StorageDriver.FileSystemStorage + if TEST_IN_DOCKER: try: db_id = int(worker_id[2]) + 1 @@ -51,19 +54,37 @@ def execute_before_any_test(worker_id): config.SETTINGS.cache.address = f"{BUILD_NAME}-cache-{db_id}" config.SETTINGS.database.address = f"{BUILD_NAME}-database-{db_id}" - config.SETTINGS.storage.settings = {"directory": "/opt/infrahub/storage"} + config.SETTINGS.storage.local = config.FileSystemStorageSettings(path="/opt/infrahub/storage") + else: + storage_dir = tmpdir_factory.mktemp("storage") + config.SETTINGS.storage.local = config.FileSystemStorageSettings(path=str(storage_dir)) config.SETTINGS.broker.enable = False config.SETTINGS.cache.enable = True config.SETTINGS.miscellaneous.start_background_runner = False config.SETTINGS.security.secret_key = "4e26b3d9-b84f-42c9-a03f-fee3ada3b2fa" config.SETTINGS.main.internal_address = "http://mock" + config.OVERRIDE.message_bus = BusRecorder() initialize_lock() -class BusRecorder(InfrahubMessageBus): +class BusRPCMock(InfrahubMessageBus): def __init__(self): + self.response: List[InfrahubResponse] = [] + + async def publish(self, message: InfrahubMessage, routing_key: str, delay: Optional[MessageTTL] = None) -> None: + pass + + def add_mock_reply(self, response: InfrahubResponse): + self.response.append(response) + + async def rpc(self, message: InfrahubMessage) -> InfrahubResponse: + return self.response.pop() + + +class BusRecorder(InfrahubMessageBus): + def __init__(self, component_type: Optional[ComponentType] = None): self.messages: List[InfrahubMessage] = [] self.messages_per_routing_key: Dict[str, List[InfrahubMessage]] = {} @@ -134,6 +155,10 @@ def get_message_bus_recorder() -> BusRecorder: def get_message_bus_simulator() -> BusSimulator: return BusSimulator() + @staticmethod + def get_message_bus_rpc() -> BusRPCMock: + return BusRPCMock() + @pytest.fixture() def helper() -> TestHelper: diff --git a/backend/tests/fixtures/checks/check01.py b/backend/tests/fixtures/checks/check01.py index 3c52eb9bf8..42c2c7f788 100644 --- a/backend/tests/fixtures/checks/check01.py +++ b/backend/tests/fixtures/checks/check01.py @@ -1,4 +1,4 @@ -from infrahub.checks import InfrahubCheck +from infrahub_sdk.checks import InfrahubCheck class Check01(InfrahubCheck): diff --git a/backend/tests/fixtures/checks/check02.py b/backend/tests/fixtures/checks/check02.py index 70d67e6819..462b27a0eb 100644 --- a/backend/tests/fixtures/checks/check02.py +++ b/backend/tests/fixtures/checks/check02.py @@ -1,4 +1,4 @@ -from infrahub.checks import InfrahubCheck +from infrahub_sdk.checks import InfrahubCheck class Check02(InfrahubCheck): diff --git a/backend/tests/fixtures/infrahub-demo-edge-cff6665.tar.gz b/backend/tests/fixtures/infrahub-demo-edge-cff6665.tar.gz index fc090371b3..ca7020eca7 100644 Binary files a/backend/tests/fixtures/infrahub-demo-edge-cff6665.tar.gz and b/backend/tests/fixtures/infrahub-demo-edge-cff6665.tar.gz differ diff --git a/backend/tests/fixtures/project_02/checks/check_spine_interface_status.py b/backend/tests/fixtures/project_02/checks/check_spine_interface_status.py index 0330a8501b..75b2d7e119 100644 --- a/backend/tests/fixtures/project_02/checks/check_spine_interface_status.py +++ b/backend/tests/fixtures/project_02/checks/check_spine_interface_status.py @@ -1,4 +1,4 @@ -from infrahub.checks import InfrahubCheck +from infrahub_sdk.checks import InfrahubCheck class InfrahubCheckSpineNbrInterfaceDisabled(InfrahubCheck): diff --git a/backend/tests/fixtures/repo-main-branch-only.tar.gz b/backend/tests/fixtures/repo-main-branch-only.tar.gz index 814172e5e5..70c93ad27b 100644 Binary files a/backend/tests/fixtures/repo-main-branch-only.tar.gz and b/backend/tests/fixtures/repo-main-branch-only.tar.gz differ diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/.gitignore b/backend/tests/fixtures/repos/infrahub-demo-edge/.gitignore new file mode 100644 index 0000000000..999ed626d9 --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/.gitignore @@ -0,0 +1,4 @@ +.vscode/* +*.pyc +*.tar.gz +.DS_Store diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/.infrahub.yml b/backend/tests/fixtures/repos/infrahub-demo-edge/.infrahub.yml new file mode 100644 index 0000000000..9b06729abc --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/.infrahub.yml @@ -0,0 +1,43 @@ +--- +schemas: + - schemas/demo_edge_fabric.yml + +rfiles: + - name: device_startup + description: "Template to generate startup configuration for network devices" + query: "device_startup_info" + template_path: "templates/device_startup_config.tpl.j2" + + - name: clab_topology + query: "topology_info" + template_path: "topology/topology.tpl.j2" + +artifact_definitions: + - name: "Openconfig Interface for Arista devices" + artifact_name: "openconfig-interfaces" + parameters: + device: "name__value" + content_type: "application/json" + targets: "arista_devices" + transformation: "oc_interfaces" + + - name: "Startup Config for Edge devices" + artifact_name: "startup-config" + parameters: + device: "name__value" + content_type: "text/plain" + targets: "edge_router" + transformation: "device_startup" + +check_definitions: + - name: "backbone_link_redundancy" + file_path: "checks/check_backbone_link_redundancy.py" + class_name: "InfrahubCheckBackboneLinkRedundancy" + +python_transforms: + - name: oc_interfaces + class_name: OCInterfaces + file_path: "transforms/openconfig.py" + - name: oc_bgp + class_name: OCBGPNeighbors + file_path: "transforms/openconfig.py" diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/checks/check_backbone_link_redundancy.gql b/backend/tests/fixtures/repos/infrahub-demo-edge/checks/check_backbone_link_redundancy.gql new file mode 100644 index 0000000000..0f92b2459f --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/checks/check_backbone_link_redundancy.gql @@ -0,0 +1,41 @@ +query check_backbone_link_redundancy { + InfraCircuit(role__value: "backbone") { + edges { + node { + id + circuit_id { + value + } + vendor_id { + value + } + status { + value + } + endpoints { + edges { + node { + site { + node { + id + name { + value + } + } + } + connected_endpoint { + node { + ... on InfraInterface { + enabled { + value + } + } + } + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/checks/check_backbone_link_redundancy.py b/backend/tests/fixtures/repos/infrahub-demo-edge/checks/check_backbone_link_redundancy.py new file mode 100644 index 0000000000..0195780e72 --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/checks/check_backbone_link_redundancy.py @@ -0,0 +1,30 @@ +from collections import defaultdict + +from infrahub.checks import InfrahubCheck + + +class InfrahubCheckBackboneLinkRedundancy(InfrahubCheck): + query = "check_backbone_link_redundancy" + + def validate(self): + site_id_by_name = {} + + backbone_links_per_site = defaultdict(lambda: defaultdict(int)) + + for circuit in self.data["data"]["circuit"]: + status = circuit["status"]["name"]["value"] + + for endpoint in circuit["endpoints"]: + site_name = endpoint["site"]["name"]["value"] + site_id_by_name[site_name] = endpoint["site"]["id"] + backbone_links_per_site[site_name]["total"] += 1 + if endpoint["connected_interface"]["enabled"]["value"] and status == "active": + backbone_links_per_site[site_name]["operational"] += 1 + + for site_name, site in backbone_links_per_site.items(): + if site.get("operational", 0) / site["total"] < 0.6: + self.log_error( + message=f"{site_name} has less than 60% of backbone circuit operational ({site.get('operational', 0)}/{site['total']})", + object_id=site_id_by_name[site_name], + object_type="site", + ) diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/schemas/demo_edge_fabric.yml b/backend/tests/fixtures/repos/infrahub-demo-edge/schemas/demo_edge_fabric.yml new file mode 100644 index 0000000000..72d9a0ed7f --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/schemas/demo_edge_fabric.yml @@ -0,0 +1,26 @@ +# yaml-language-server: $schema=https://schema.infrahub.app/develop/schema.schema.json +--- +version: '1.0' +nodes: + - name: EdgeFabric + namespace: Demo + description: "." + label: "EdgeFabric" + default_filter: name__value + display_labels: + - name__value + attributes: + - name: name + kind: Text + # unique: true + - name: description + kind: Text + optional: true + - name: nbr_racks + kind: Number + relationships: + - name: tags + peer: BuiltinTag + optional: true + cardinality: many + kind: Attribute diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/templates/device_startup_config.tpl.j2 b/backend/tests/fixtures/repos/infrahub-demo-edge/templates/device_startup_config.tpl.j2 new file mode 100644 index 0000000000..9c65d5c6a6 --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/templates/device_startup_config.tpl.j2 @@ -0,0 +1,111 @@ +{% set ns = namespace(loopback_intf_name=none, loopback_ip=none, management_intf_name=none, management_ip=none) %} +{% for intf in data.InfraDevice.edges[0].node.interfaces.edges %} +{% if intf.node.role.value == "loopback" %} +{% set ns.loopback_intf_name = intf.node.name.value %} +{% set ns.loopback_ip = intf.node.ip_addresses.edges[0].node.address.value.split('/')[0] %} +{% elif intf.node.role.value == "management" %} +{% set ns.management_intf_name = intf.node.name.value %} +{% set ns.management_ip = intf.node.ip_addresses.edges[0].node.address.value.split('/')[0] %} +{% endif %} +{% endfor %} +no aaa root +! +username admin privilege 15 role network-admin secret sha512 $6$q4ez.aZgB/G/eeWW$ukvRobb5RtYmUlCcY0atxhwPmA6FPoRjR3AxYFJqNFoCRgJjrohKGrBsbY12n1uRZeCer1L8oejx5aPlrf.op0 +! +transceiver qsfp default-mode 4x10G +! +service routing protocols model multi-agent +! +hostname {{ data.InfraDevice.edges[0].node.name.value }} +! +spanning-tree mode mstp +! +management api http-commands + no shutdown +! +management api gnmi + transport grpc default +! +management api netconf + transport ssh default +! +{% for intf in data.InfraDevice.edges[0].node.interfaces.edges %} +{% if intf.node.name.value != ns.management_intf_name and intf.node.name.value != ns.loopback_intf_name %} +interface {{ intf.node.name.value }} +{% if intf.node["description"]["value"] %} + description {{ intf.node["description"]["value"] }} +{% else %} + description role: {{ intf.node.role.value }} +{% endif %} +{% if not intf.node["enabled"]["value"] %} + shutdown +{% endif %} + +{% if intf.node["ip_addresses"] %} +{% for ip in intf.node["ip_addresses"]["edges"] %} + ip address {{ ip.node["address"]["value"] }} + no switchport +{% if intf.node.role.value == "peer" or intf.node.role.value == "backbone" %} + ip ospf network point-to-point +{% endif %} +{% endfor %} +{% endif %} +! +{% endif %} +{% endfor %} +! +interface {{ ns.management_intf_name }} +{% for intf in data.InfraDevice.edges[0]["interfaces"] %} +{% if intf.node.name.value == ns.management_intf_name %} +{% for ip in intf["ip_addresses"] %} + ip address {{ ip["address"]["value"] }} +{% endfor %} +{% endif %} +{% endfor %} +! +interface {{ ns.loopback_intf_name }} +{% for intf in data.InfraDevice.edges[0]["interfaces"] %} +{% if intf.node.name.value == ns.loopback_intf_name %} +{% for ip in intf["ip_addresses"] %} + ip address {{ ip["address"]["value"] }} +{% endfor %} +{% endif %} +{% endfor %} +! +ip prefix-list BOGON-Prefixes seq 10 permit 172.16.0.0/12 le 24 +ip prefix-list BOGON-Prefixes seq 20 permit 192.168.0.0/16 le 24 +ip prefix-list BOGON-Prefixes seq 10 permit 172.16.0.0/12 le 24 +ip prefix-list BOGON-Prefixes seq 20 permit 192.168.0.0/16 le 24 +! +ip routing +! +ip route 0.0.0.0/0 172.20.20.1 +! +{% if data.InfraDevice.edges[0].node.asn %} +router bgp {{ data.InfraDevice.edges[0].node.asn.node.asn.value }} + router-id {{ loopback_ip }} +{% for peer_group in data.InfraBGPPeerGroup.edges %} + neighbor {{ peer_group.node.name.value }} peer group +{% if peer_group.node.local_as %} + neighbor {{ peer_group.node.name.value }} local-as {{ peer_group.node.local_as.node.asn.value }} +{% endif %} +{% if peer_group.node.remote_as and peer_group.node.remote_as.node %} + neighbor {{ peer_group.node.name.value }} remote-as {{ peer_group.node.remote_as.node.asn.value }} +{% endif %} +{% endfor %} +! +{% endif %} +! +router ospf 1 + router-id {{ loopback_ip }} + redistribute connected + max-lsa 12000 + passive-interface Loopback0 + network 0.0.0.0/0 area 0.0.0.0 +! +route-map BOGONS permit 10 + match ip address prefix-list BOGON-Prefixes +! +route-map BOGONS deny 20 +! +end diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/templates/device_startup_info.gql b/backend/tests/fixtures/repos/infrahub-demo-edge/templates/device_startup_info.gql new file mode 100644 index 0000000000..d243254867 --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/templates/device_startup_info.gql @@ -0,0 +1,73 @@ +query($device: String!) { + InfraDevice(name__value: $device) { + edges { + node { + id + name { + value + } + asn { + node { + asn { + value + } + } + } + interfaces { + edges { + node { + id + name { + value + } + description { + value + } + enabled { + value + } + role { + value + } + ... on InfraInterfaceL3 { + ip_addresses { + edges { + node { + address { + value + } + } + } + } + } + } + } + } + + } + } + } + InfraBGPPeerGroup { + edges { + node { + name { + value + } + local_as { + node { + asn { + value + } + } + } + remote_as { + node { + asn { + value + } + } + } + } + } + } +} \ No newline at end of file diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/tests/conftest.py b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/conftest.py new file mode 100644 index 0000000000..7cced50646 --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/conftest.py @@ -0,0 +1,56 @@ +import os +from pathlib import Path +from typing import Tuple + +import pytest +import ujson +from infrahub_sdk import InfrahubClientSync + + +class TestHelper: + """TestHelper profiles functions that can be used as a fixture throughout the test framework""" + + @staticmethod + def fixture_file(file_name: str) -> dict: + """Return the contents of a fixture file as a dictionary""" + file_content = Path(os.path.join(TestHelper.get_fixtures_dir(), file_name)).read_text() + + return ujson.loads(file_content) + + @staticmethod + def fixture_files(directory_name: str) -> Tuple[dict, dict]: + """Return the contents of a schema file as a dictionary""" + + data_file = TestHelper.fixture_file(os.path.join(directory_name, "data.json")) + + if "data" in data_file: + data_file = data_file["data"] + + response_file = TestHelper.fixture_file(os.path.join(directory_name, "response.json")) + + return (data_file, response_file) + + @staticmethod + def get_fixtures_dir(): + """Get the directory which stores fixtures that are common to multiple unit/integration tests.""" + here = os.path.abspath(os.path.dirname(__file__)) + fixtures_dir = os.path.join(here, "fixtures") + + return os.path.abspath(fixtures_dir) + + +@pytest.fixture() +def root_directory() -> str: + here = os.path.abspath(os.path.dirname(__file__)) + root_dir = os.path.join(here, "../") + return os.path.abspath(root_dir) + + +@pytest.fixture() +def helper() -> TestHelper: + return TestHelper() + + +@pytest.fixture() +def client_sync() -> InfrahubClientSync: + return InfrahubClientSync.init(address="http://localhost:8000", insert_tracker=True) diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_bgp_neighbors/test01/data.json b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_bgp_neighbors/test01/data.json new file mode 100644 index 0000000000..e5d470b133 --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_bgp_neighbors/test01/data.json @@ -0,0 +1,481 @@ +{ + "data": { + "bgp_session": { + "edges": [ + { + "node": { + "id": "d0d47169-b1f3-4255-ab86-c06b39ea8d84", + "peer_group": { + "node": { + "name": { + "value": "TRANSIT_TELIA" + } + } + }, + "local_ip": { + "node": { + "address": { + "value": "203.0.113.9/29" + } + } + }, + "remote_ip": { + "node": { + "address": { + "value": "203.0.113.10/29" + } + } + }, + "local_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "remote_as": { + "node": { + "asn": { + "value": 1299 + } + } + }, + "description": { + "value": null + } + } + }, + { + "node": { + "id": "d2e49bff-ddf2-484b-a4f5-f1ac076a2a2e", + "peer_group": { + "node": { + "name": { + "value": "TRANSIT_DEFAULT" + } + } + }, + "local_ip": { + "node": { + "address": { + "value": "203.0.113.49/29" + } + } + }, + "remote_ip": { + "node": { + "address": { + "value": "203.0.113.50/29" + } + } + }, + "local_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "remote_as": { + "node": { + "asn": { + "value": 8220 + } + } + }, + "description": { + "value": null + } + } + }, + { + "node": { + "id": "066cb468-3229-4595-a4a4-a88b3bef082c", + "peer_group": { + "node": { + "name": { + "value": "POP_INTERNAL" + } + } + }, + "local_ip": { + "node": { + "address": { + "value": "10.0.0.2/32" + } + } + }, + "remote_ip": { + "node": { + "address": { + "value": "10.0.0.7/32" + } + } + }, + "local_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "remote_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "description": { + "value": null + } + } + }, + { + "node": { + "id": "4ece88b2-665c-4929-ac3e-96430f91974a", + "peer_group": { + "node": { + "name": { + "value": "POP_GLOBAL" + } + } + }, + "local_ip": { + "node": { + "address": { + "value": "10.0.0.2/32" + } + } + }, + "remote_ip": { + "node": { + "address": { + "value": "10.0.0.3/32" + } + } + }, + "local_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "remote_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "description": { + "value": null + } + } + }, + { + "node": { + "id": "4b566879-b47d-452a-9b5a-2e2cee9190b7", + "peer_group": { + "node": { + "name": { + "value": "POP_GLOBAL" + } + } + }, + "local_ip": { + "node": { + "address": { + "value": "10.0.0.2/32" + } + } + }, + "remote_ip": { + "node": { + "address": { + "value": "10.0.0.8/32" + } + } + }, + "local_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "remote_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "description": { + "value": null + } + } + }, + { + "node": { + "id": "f315994a-0caa-4677-b65d-d9dda58b06f3", + "peer_group": { + "node": { + "name": { + "value": "POP_GLOBAL" + } + } + }, + "local_ip": { + "node": { + "address": { + "value": "10.0.0.2/32" + } + } + }, + "remote_ip": { + "node": { + "address": { + "value": "10.0.0.4/32" + } + } + }, + "local_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "remote_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "description": { + "value": null + } + } + }, + { + "node": { + "id": "1815f356-9f87-41c8-b817-42ecf6501628", + "peer_group": { + "node": { + "name": { + "value": "POP_GLOBAL" + } + } + }, + "local_ip": { + "node": { + "address": { + "value": "10.0.0.2/32" + } + } + }, + "remote_ip": { + "node": { + "address": { + "value": "10.0.0.5/32" + } + } + }, + "local_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "remote_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "description": { + "value": null + } + } + }, + { + "node": { + "id": "8c921dee-fe45-48c7-a940-ef10a2384a19", + "peer_group": { + "node": { + "name": { + "value": "POP_GLOBAL" + } + } + }, + "local_ip": { + "node": { + "address": { + "value": "10.0.0.2/32" + } + } + }, + "remote_ip": { + "node": { + "address": { + "value": "10.0.0.1/32" + } + } + }, + "local_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "remote_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "description": { + "value": null + } + } + }, + { + "node": { + "id": "69a3dc11-5440-403c-8013-40d6d0ebad0a", + "peer_group": { + "node": { + "name": { + "value": "POP_GLOBAL" + } + } + }, + "local_ip": { + "node": { + "address": { + "value": "10.0.0.2/32" + } + } + }, + "remote_ip": { + "node": { + "address": { + "value": "10.0.0.6/32" + } + } + }, + "local_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "remote_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "description": { + "value": null + } + } + }, + { + "node": { + "id": "3c82dabf-977b-4fce-b9a1-2f5f3be52a18", + "peer_group": { + "node": { + "name": { + "value": "POP_GLOBAL" + } + } + }, + "local_ip": { + "node": { + "address": { + "value": "10.0.0.2/32" + } + } + }, + "remote_ip": { + "node": { + "address": { + "value": "10.0.0.9/32" + } + } + }, + "local_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "remote_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "description": { + "value": null + } + } + }, + { + "node": { + "id": "207ea49c-6af7-4a92-9a99-3b38d73d0487", + "peer_group": { + "node": { + "name": { + "value": "POP_GLOBAL" + } + } + }, + "local_ip": { + "node": { + "address": { + "value": "10.0.0.2/32" + } + } + }, + "remote_ip": { + "node": { + "address": { + "value": "10.0.0.10/32" + } + } + }, + "local_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "remote_as": { + "node": { + "asn": { + "value": 64496 + } + } + }, + "description": { + "value": null + } + } + } + ] + } + } + } \ No newline at end of file diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_bgp_neighbors/test01/response.json b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_bgp_neighbors/test01/response.json new file mode 100644 index 0000000000..53ddae5fbc --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_bgp_neighbors/test01/response.json @@ -0,0 +1,105 @@ +{ + "openconfig-bgp:neighbors": { + "neighbor": [ + { + "neighbor-address": "203.0.113.10", + "config": { + "neighbor-address": "203.0.113.10", + "peer-group": "TRANSIT_TELIA", + "peer-as": 1299, + "local-as": 64496 + } + }, + { + "neighbor-address": "203.0.113.50", + "config": { + "neighbor-address": "203.0.113.50", + "peer-group": "TRANSIT_DEFAULT", + "peer-as": 8220, + "local-as": 64496 + } + }, + { + "neighbor-address": "10.0.0.7", + "config": { + "neighbor-address": "10.0.0.7", + "peer-group": "POP_INTERNAL", + "peer-as": 64496, + "local-as": 64496 + } + }, + { + "neighbor-address": "10.0.0.3", + "config": { + "neighbor-address": "10.0.0.3", + "peer-group": "POP_GLOBAL", + "peer-as": 64496, + "local-as": 64496 + } + }, + { + "neighbor-address": "10.0.0.8", + "config": { + "neighbor-address": "10.0.0.8", + "peer-group": "POP_GLOBAL", + "peer-as": 64496, + "local-as": 64496 + } + }, + { + "neighbor-address": "10.0.0.4", + "config": { + "neighbor-address": "10.0.0.4", + "peer-group": "POP_GLOBAL", + "peer-as": 64496, + "local-as": 64496 + } + }, + { + "neighbor-address": "10.0.0.5", + "config": { + "neighbor-address": "10.0.0.5", + "peer-group": "POP_GLOBAL", + "peer-as": 64496, + "local-as": 64496 + } + }, + { + "neighbor-address": "10.0.0.1", + "config": { + "neighbor-address": "10.0.0.1", + "peer-group": "POP_GLOBAL", + "peer-as": 64496, + "local-as": 64496 + } + }, + { + "neighbor-address": "10.0.0.6", + "config": { + "neighbor-address": "10.0.0.6", + "peer-group": "POP_GLOBAL", + "peer-as": 64496, + "local-as": 64496 + } + }, + { + "neighbor-address": "10.0.0.9", + "config": { + "neighbor-address": "10.0.0.9", + "peer-group": "POP_GLOBAL", + "peer-as": 64496, + "local-as": 64496 + } + }, + { + "neighbor-address": "10.0.0.10", + "config": { + "neighbor-address": "10.0.0.10", + "peer-group": "POP_GLOBAL", + "peer-as": 64496, + "local-as": 64496 + } + } + ] + } +} \ No newline at end of file diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_interfaces/test01/data.json b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_interfaces/test01/data.json new file mode 100644 index 0000000000..563f822403 --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_interfaces/test01/data.json @@ -0,0 +1,275 @@ +{ + "data": { + "device": { + "edges": [ + { + "node": { + "id": "67bbcb4f-5ee7-4c9b-a598-58985ff3dbd4", + "interfaces": { + "edges": [ + { + "node": { + "name": { + "value": "Ethernet2" + }, + "description": { + "value": "Connected to ord1-edge2 Ethernet2" + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [] + } + } + }, + { + "node": { + "name": { + "value": "Ethernet3" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [] + } + } + }, + { + "node": { + "name": { + "value": "Ethernet12" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + } + } + }, + { + "node": { + "name": { + "value": "Ethernet4" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [] + } + } + }, + { + "node": { + "name": { + "value": "Ethernet6" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [ + { + "node": { + "address": { + "value": "203.0.113.49/29" + } + } + } + ] + } + } + }, + { + "node": { + "name": { + "value": "Ethernet10" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [] + } + } + }, + { + "node": { + "name": { + "value": "Ethernet1" + }, + "description": { + "value": "Connected to ord1-edge2 Ethernet1" + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [] + } + } + }, + { + "node": { + "name": { + "value": "Ethernet11" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + } + } + }, + { + "node": { + "name": { + "value": "Ethernet9" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [ + { + "node": { + "address": { + "value": "203.0.113.81/29" + } + } + } + ] + } + } + }, + { + "node": { + "name": { + "value": "Loopback0" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [ + { + "node": { + "address": { + "value": "10.0.0.2/32" + } + } + } + ] + } + } + }, + { + "node": { + "name": { + "value": "Ethernet5" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [ + { + "node": { + "address": { + "value": "203.0.113.9/29" + } + } + } + ] + } + } + }, + { + "node": { + "name": { + "value": "Ethernet7" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [] + } + } + }, + { + "node": { + "name": { + "value": "Management0" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [ + { + "node": { + "address": { + "value": "172.20.20.18/24" + } + } + } + ] + } + } + }, + { + "node": { + "name": { + "value": "Ethernet8" + }, + "description": { + "value": null + }, + "enabled": { + "value": true + }, + "ip_addresses": { + "edges": [] + } + } + } + ] + } + } + } + ] + } + } + } \ No newline at end of file diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_interfaces/test01/response.json b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_interfaces/test01/response.json new file mode 100644 index 0000000000..9dc5318f1e --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/fixtures/oc_interfaces/test01/response.json @@ -0,0 +1,226 @@ +{ + "openconfig-interfaces:interface": [ + { + "name": "Ethernet2", + "config": { + "enabled": true, + "description": "Connected to ord1-edge2 Ethernet2" + }, + "subinterfaces": { + "subinterface": [] + } + }, + { + "name": "Ethernet3", + "config": { + "enabled": true + }, + "subinterfaces": { + "subinterface": [] + } + }, + { + "name": "Ethernet12", + "config": { + "enabled": true + } + }, + { + "name": "Ethernet4", + "config": { + "enabled": true + }, + "subinterfaces": { + "subinterface": [] + } + }, + { + "name": "Ethernet6", + "config": { + "enabled": true + }, + "subinterfaces": { + "subinterface": [ + { + "index": 0, + "openconfig-if-ip:ipv4": { + "addresses": { + "address": [ + { + "ip": "203.0.113.49", + "config": { + "ip": "203.0.113.49", + "prefix-length": "29" + } + } + ] + }, + "config": { + "enabled": true + } + } + } + ] + } + }, + { + "name": "Ethernet10", + "config": { + "enabled": true + }, + "subinterfaces": { + "subinterface": [] + } + }, + { + "name": "Ethernet1", + "config": { + "enabled": true, + "description": "Connected to ord1-edge2 Ethernet1" + }, + "subinterfaces": { + "subinterface": [] + } + }, + { + "name": "Ethernet11", + "config": { + "enabled": true + } + }, + { + "name": "Ethernet9", + "config": { + "enabled": true + }, + "subinterfaces": { + "subinterface": [ + { + "index": 0, + "openconfig-if-ip:ipv4": { + "addresses": { + "address": [ + { + "ip": "203.0.113.81", + "config": { + "ip": "203.0.113.81", + "prefix-length": "29" + } + } + ] + }, + "config": { + "enabled": true + } + } + } + ] + } + }, + { + "name": "Loopback0", + "config": { + "enabled": true + }, + "subinterfaces": { + "subinterface": [ + { + "index": 0, + "openconfig-if-ip:ipv4": { + "addresses": { + "address": [ + { + "ip": "10.0.0.2", + "config": { + "ip": "10.0.0.2", + "prefix-length": "32" + } + } + ] + }, + "config": { + "enabled": true + } + } + } + ] + } + }, + { + "name": "Ethernet5", + "config": { + "enabled": true + }, + "subinterfaces": { + "subinterface": [ + { + "index": 0, + "openconfig-if-ip:ipv4": { + "addresses": { + "address": [ + { + "ip": "203.0.113.9", + "config": { + "ip": "203.0.113.9", + "prefix-length": "29" + } + } + ] + }, + "config": { + "enabled": true + } + } + } + ] + } + }, + { + "name": "Ethernet7", + "config": { + "enabled": true + }, + "subinterfaces": { + "subinterface": [] + } + }, + { + "name": "Management0", + "config": { + "enabled": true + }, + "subinterfaces": { + "subinterface": [ + { + "index": 0, + "openconfig-if-ip:ipv4": { + "addresses": { + "address": [ + { + "ip": "172.20.20.18", + "config": { + "ip": "172.20.20.18", + "prefix-length": "24" + } + } + ] + }, + "config": { + "enabled": true + } + } + } + ] + } + }, + { + "name": "Ethernet8", + "config": { + "enabled": true + }, + "subinterfaces": { + "subinterface": [] + } + } + ] +} \ No newline at end of file diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/tests/integration/graphql/test_graphql_query.py b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/integration/graphql/test_graphql_query.py new file mode 100644 index 0000000000..ff6f68e5d2 --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/integration/graphql/test_graphql_query.py @@ -0,0 +1,20 @@ +import pytest +from infrahub_ctl.utils import find_graphql_query +from infrahub_sdk import InfrahubClientSync + + +@pytest.mark.parametrize( + "query_name,variables", + [ + ("device_startup_info", {"device": "ord1-edge1"}), + ("oc_interfaces", {"device": "ord1-edge1"}), + ("oc_bgp_neighbors", {"device": "ord1-edge1"}), + ("topology_info", {}), + ("check_backbone_link_redundancy", {}), + ], +) +def test_graphql_queries(root_directory, client_sync: InfrahubClientSync, query_name: str, variables: dict): + query_str = find_graphql_query(name=query_name, directory=root_directory) + response = client_sync.execute_graphql(query=query_str, variables=variables, raise_for_error=False) + + assert "errors" not in response diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/tests/integration/transforms/test_openconfig_integration.py b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/integration/transforms/test_openconfig_integration.py new file mode 100644 index 0000000000..4c899848c3 --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/integration/transforms/test_openconfig_integration.py @@ -0,0 +1,24 @@ +from infrahub_ctl.utils import find_graphql_query +from transforms.openconfig import OCBGPNeighbors, OCInterfaces + + +async def test_oc_interfaces_standard(helper, root_directory, client_sync): + transform = OCInterfaces() + query = find_graphql_query(name=transform.query, directory=root_directory) + data = client_sync.execute_graphql(query=query, variables={"device": "ord1-edge1"}) + + response = await transform.transform(data=data) + assert "openconfig-interfaces:interface" in response + assert len(response["openconfig-interfaces:interface"]) > 2 + + +async def test_oc_bgp_neighbors_standard(helper, root_directory, client_sync): + transform = OCBGPNeighbors() + query = find_graphql_query(name=transform.query, directory=root_directory) + data = client_sync.execute_graphql(query=query, variables={"device": "ord1-edge1"}) + + response = await transform.transform(data=data) + + assert "openconfig-bgp:neighbors" in response + assert "neighbor" in response["openconfig-bgp:neighbors"] + assert len(response["openconfig-bgp:neighbors"]["neighbor"]) > 2 diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/tests/unit/transforms/test_openconfig_unit.py b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/unit/transforms/test_openconfig_unit.py new file mode 100644 index 0000000000..d11bf8178e --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/tests/unit/transforms/test_openconfig_unit.py @@ -0,0 +1,13 @@ +from transforms.openconfig import OCBGPNeighbors, OCInterfaces + + +async def test_oc_interfaces_standard(helper): + data, response = helper.fixture_files(directory_name="oc_interfaces/test01") + transform = OCInterfaces() + assert await transform.transform(data=data) == response + + +async def test_oc_bgp_neighbors_standard(helper): + data, response = helper.fixture_files(directory_name="oc_bgp_neighbors/test01") + transform = OCBGPNeighbors() + assert await transform.transform(data=data) == response diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/topology/topology_info.gql b/backend/tests/fixtures/repos/infrahub-demo-edge/topology/topology_info.gql new file mode 100644 index 0000000000..ef7f001ae5 --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/topology/topology_info.gql @@ -0,0 +1,32 @@ +query { + InfraDevice { + edges { + node { + name { + value + } + interfaces { + edges { + node { + id + role { + value + } + ... on InfraInterfaceL3 { + ip_addresses { + edges { + node { + address { + value + } + } + } + } + } + } + } + } + } + } + } +} diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/transforms/oc_bgp_neighbors.gql b/backend/tests/fixtures/repos/infrahub-demo-edge/transforms/oc_bgp_neighbors.gql new file mode 100644 index 0000000000..8ffd303eda --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/transforms/oc_bgp_neighbors.gql @@ -0,0 +1,47 @@ +query oc_bgp_neighbors ($device: String!) { + InfraBGPSession(device__name__value: $device) { + edges { + node { + id + peer_group { + node { + name { + value + } + } + } + local_ip { + node { + address { + value + } + } + } + remote_ip { + node { + address { + value + } + } + } + local_as { + node { + asn { + value + } + } + } + remote_as { + node { + asn { + value + } + } + } + description { + value + } + } + } + } +} diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/transforms/oc_interfaces.gql b/backend/tests/fixtures/repos/infrahub-demo-edge/transforms/oc_interfaces.gql new file mode 100644 index 0000000000..3f91bbf8af --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/transforms/oc_interfaces.gql @@ -0,0 +1,35 @@ +query oc_interfaces ($device: String!) { + InfraDevice(name__value: $device) { + edges { + node { + id + interfaces { + edges { + node { + name { + value + } + description { + value + } + enabled { + value + } + ... on InfraInterfaceL3 { + ip_addresses { + edges { + node { + address { + value + } + } + } + } + } + } + } + } + } + } + } +} diff --git a/backend/tests/fixtures/repos/infrahub-demo-edge/transforms/openconfig.py b/backend/tests/fixtures/repos/infrahub-demo-edge/transforms/openconfig.py new file mode 100644 index 0000000000..0deab47dc5 --- /dev/null +++ b/backend/tests/fixtures/repos/infrahub-demo-edge/transforms/openconfig.py @@ -0,0 +1,80 @@ +from infrahub.transforms import InfrahubTransform + + +class OCInterfaces(InfrahubTransform): + query = "oc_interfaces" + url = "openconfig/interfaces" + + async def transform(self, data): + response_payload = {} + response_payload["openconfig-interfaces:interface"] = [] + + for intf in data["InfraDevice"]["edges"][0]["node"]["interfaces"]["edges"]: + intf_name = intf["node"]["name"]["value"] + + intf_config = { + "name": intf_name, + "config": {"enabled": intf["node"]["enabled"]["value"]}, + } + + if intf["node"].get("description", None) and intf["node"]["description"]["value"]: + intf_config["config"]["description"] = intf["node"]["description"]["value"] + + if intf["node"].get("ip_addresses", None): + intf_config["subinterfaces"] = {"subinterface": []} + + for idx, ip in enumerate(intf["node"]["ip_addresses"]["edges"]): + address, mask = ip["node"]["address"]["value"].split("/") + intf_config["subinterfaces"]["subinterface"].append( + { + "index": idx, + "openconfig-if-ip:ipv4": { + "addresses": { + "address": [ + { + "ip": address, + "config": { + "ip": address, + "prefix-length": mask, + }, + } + ] + }, + "config": {"enabled": True}, + }, + } + ) + + response_payload["openconfig-interfaces:interface"].append(intf_config) + + return response_payload + + +class OCBGPNeighbors(InfrahubTransform): + query = "oc_bgp_neighbors" + url = "openconfig/network-instances/network-instance/protocols/protocol/bgp/neighbors" + + async def transform(self, data): + response_payload = {} + + response_payload["openconfig-bgp:neighbors"] = {"neighbor": []} + + for session in data["InfraBGPSession"]["edges"]: + neighbor_address = session["node"]["remote_ip"]["node"]["address"]["value"].split("/")[0] + session_data = { + "neighbor-address": neighbor_address, + "config": {"neighbor-address": neighbor_address}, + } + + if session["node"]["peer_group"]: + session_data["config"]["peer-group"] = session["node"]["peer_group"]["node"]["name"]["value"] + + if session["node"]["remote_as"]: + session_data["config"]["peer-as"] = session["node"]["remote_as"]["node"]["asn"]["value"] + + if session["node"]["local_as"]: + session_data["config"]["local-as"] = session["node"]["local_as"]["node"]["asn"]["value"] + + response_payload["openconfig-bgp:neighbors"]["neighbor"].append(session_data) + + return response_payload diff --git a/backend/tests/fixtures/repos/test_base/unit/transforms/multiplier.py b/backend/tests/fixtures/repos/test_base/unit/transforms/multiplier.py index 2f0b73e806..7ac210ea67 100644 --- a/backend/tests/fixtures/repos/test_base/unit/transforms/multiplier.py +++ b/backend/tests/fixtures/repos/test_base/unit/transforms/multiplier.py @@ -1,6 +1,6 @@ from typing import Any, Dict -from infrahub.transforms import InfrahubTransform +from infrahub_sdk.transforms import InfrahubTransform class Multiplier(InfrahubTransform): diff --git a/backend/tests/fixtures/schemas/infra_simple_01.json b/backend/tests/fixtures/schemas/infra_simple_01.json index 2311097de5..0f394a2e51 100644 --- a/backend/tests/fixtures/schemas/infra_simple_01.json +++ b/backend/tests/fixtures/schemas/infra_simple_01.json @@ -9,7 +9,9 @@ "attributes": [ {"name": "name", "kind": "Text", "unique": true}, {"name": "description", "kind": "Text", "optional": true, "order_weight": 900}, - {"name": "type", "kind": "Text"} + {"name": "type", "kind": "Text"}, + {"name": "role", "kind": "Dropdown", "choices": [{"name": "router"}, {"name": "firewall"}]}, + {"name": "status", "kind": "Dropdown", "choices": [{"name": "active"}, {"name": "planned"}]} ], "relationships": [ { @@ -34,8 +36,6 @@ {"name": "enabled", "kind": "Boolean", "default_value": true} ], "relationships": [ - {"name": "status", "peer": "BuiltinStatus", "optional": false, "cardinality": "one", "kind": "Attribute"}, - {"name": "role", "peer": "BuiltinRole", "optional": false, "cardinality": "one", "kind": "Attribute"}, {"name": "device", "peer": "InfraDevice", "optional": false, "cardinality": "one", "kind": "Parent"}, {"name": "tags", "peer": "BuiltinTag", "optional": true, "cardinality": "many", "kind": "Attribute"}, { diff --git a/backend/tests/fixtures/schemas/infra_w_generics_01.json b/backend/tests/fixtures/schemas/infra_w_generics_01.json index 5622f5050f..13bd15ffd7 100644 --- a/backend/tests/fixtures/schemas/infra_w_generics_01.json +++ b/backend/tests/fixtures/schemas/infra_w_generics_01.json @@ -9,11 +9,11 @@ {"name": "name", "kind": "Text"}, {"name": "description", "kind": "Text", "optional": true}, {"name": "speed", "kind": "Number"}, - {"name": "enabled", "kind": "Boolean", "default_value": true} + {"name": "enabled", "kind": "Boolean", "default_value": true}, + {"name": "role", "kind": "Dropdown", "choices": [{"name": "router"}, {"name": "firewall"}]}, + {"name": "status", "kind": "Dropdown", "choices": [{"name": "active"}, {"name": "planned"}]} ], "relationships": [ - {"name": "status", "peer": "BuiltinStatus", "optional": false, "cardinality": "one", "kind": "Attribute"}, - {"name": "role", "peer": "BuiltinRole", "optional": false, "cardinality": "one", "kind": "Attribute"}, {"name": "device", "peer": "InfraDevice", "optional": false, "cardinality": "one", "kind": "Parent"}, {"name": "tags", "peer": "BuiltinTag", "optional": true, "cardinality": "many", "kind": "Attribute"} ] diff --git a/backend/tests/fixtures/schemas/not_valid_simple_05.json b/backend/tests/fixtures/schemas/not_valid_simple_05.json index 2e29b072a1..fad0c6fbb6 100644 --- a/backend/tests/fixtures/schemas/not_valid_simple_05.json +++ b/backend/tests/fixtures/schemas/not_valid_simple_05.json @@ -2,8 +2,8 @@ "version": "1.0", "nodes": [ { - "name": "class", - "kind": "Class", + "name": "None", + "namespace": "Test", "default_filter": "name__value", "display_labels": [ "name__value" diff --git a/backend/tests/fixtures/transforms/transform01.py b/backend/tests/fixtures/transforms/transform01.py index 369e99c060..4b2ee61a59 100644 --- a/backend/tests/fixtures/transforms/transform01.py +++ b/backend/tests/fixtures/transforms/transform01.py @@ -1,4 +1,4 @@ -from infrahub.transforms import InfrahubTransform +from infrahub_sdk.transforms import InfrahubTransform class Transform01(InfrahubTransform): diff --git a/backend/tests/fixtures/transforms/transform02.py b/backend/tests/fixtures/transforms/transform02.py index 2f8c571b1e..d88e7d0c59 100644 --- a/backend/tests/fixtures/transforms/transform02.py +++ b/backend/tests/fixtures/transforms/transform02.py @@ -1,9 +1,8 @@ -from infrahub.transforms import InfrahubTransform +from infrahub_sdk.transforms import InfrahubTransform class Transform02(InfrahubTransform): query = "my_query" - # url = "transform01" def transform(self, data: dict): return {str(key).upper(): value for key, value in data.items()} diff --git a/backend/tests/integration/git/conftest.py b/backend/tests/integration/git/conftest.py index 91ee404b4b..2984ffa89f 100644 --- a/backend/tests/integration/git/conftest.py +++ b/backend/tests/integration/git/conftest.py @@ -1,8 +1,9 @@ import os -import tarfile +import shutil from typing import Dict import pytest +from git.repo import Repo import infrahub.config as config @@ -33,11 +34,12 @@ def git_upstream_repo_10(helper, git_sources_dir) -> Dict[str, str]: name = "infrahub-demo-edge" fixtures_dir = helper.get_fixtures_dir() - fixture_repo = os.path.join(fixtures_dir, "infrahub-demo-edge-cff6665.tar.gz") - # Extract the fixture package in the source directory - file = tarfile.open(fixture_repo) - file.extractall(git_sources_dir) - file.close() + test_base = os.path.join(fixtures_dir, f"repos/{name}") + shutil.copytree(test_base, f"{git_sources_dir}/{name}") + origin = Repo.init(f"{git_sources_dir}/{name}", initial_branch="main") + for untracked in origin.untracked_files: + origin.index.add(untracked) + origin.index.commit("First commit") return dict(name=name, path=str(os.path.join(git_sources_dir, name))) diff --git a/backend/tests/integration/git/test_git_repository.py b/backend/tests/integration/git/test_git_repository.py index 5dd704d13b..ab34244643 100644 --- a/backend/tests/integration/git/test_git_repository.py +++ b/backend/tests/integration/git/test_git_repository.py @@ -115,7 +115,9 @@ async def repo(self, test_client, client, db: InfrahubDatabase, git_upstream_rep async def test_import_schema_files(self, db: InfrahubDatabase, client: InfrahubClient, repo: InfrahubRepository): commit = repo.get_commit_value(branch_name="main") - await repo.import_schema_files(branch_name="main", commit=commit) + config_file = await repo.get_repository_config(branch_name="main", commit=commit) + assert config_file + await repo.import_schema_files(branch_name="main", commit=commit, config_file=config_file) assert await client.schema.get(kind="DemoEdgeFabric", refresh=True) @@ -160,7 +162,10 @@ async def test_import_all_python_files( self, db: InfrahubDatabase, client: InfrahubClient, repo: InfrahubRepository, query_99 ): commit = repo.get_commit_value(branch_name="main") - await repo.import_all_python_files(branch_name="main", commit=commit) + config_file = await repo.get_repository_config(branch_name="main", commit=commit) + assert config_file + + await repo.import_all_python_files(branch_name="main", commit=commit, config_file=config_file) check_definitions = await client.all(kind="CoreCheckDefinition") assert len(check_definitions) >= 1 @@ -170,7 +175,7 @@ async def test_import_all_python_files( # Validate if the function is idempotent, another import just after the first one shouldn't change anything nbr_relationships_before = await count_relationships(db=db) - await repo.import_all_python_files(branch_name="main", commit=commit) + await repo.import_all_python_files(branch_name="main", commit=commit, config_file=config_file) assert await count_relationships(db=db) == nbr_relationships_before # 1. Modify an object to validate if its being properly updated @@ -213,7 +218,7 @@ async def test_import_all_python_files( ) await obj2.save(db=db) - await repo.import_all_python_files(branch_name="main", commit=commit) + await repo.import_all_python_files(branch_name="main", commit=commit, config_file=config_file) modified_check0 = await client.get(kind="CoreCheckDefinition", id=check_definitions[0].id) assert modified_check0.timeout.value == check_timeout_value_before_change @@ -236,14 +241,16 @@ async def test_import_all_yaml_files( self, db: InfrahubDatabase, client: InfrahubClient, repo: InfrahubRepository, query_99 ): commit = repo.get_commit_value(branch_name="main") - await repo.import_all_yaml_files(branch_name="main", commit=commit, exclude=["artifact_definitions"]) + config_file = await repo.get_repository_config(branch_name="main", commit=commit) + assert config_file + await repo.import_rfiles(branch_name="main", commit=commit, config_file=config_file) rfiles = await client.all(kind="CoreRFile") assert len(rfiles) == 2 # Validate if the function is idempotent, another import just after the first one shouldn't change anything nbr_relationships_before = await count_relationships(db=db) - await repo.import_all_yaml_files(branch_name="main", commit=commit, exclude=["artifact_definitions"]) + await repo.import_rfiles(branch_name="main", commit=commit, config_file=config_file) assert await count_relationships(db=db) == nbr_relationships_before # 1. Modify an object to validate if its being properly updated @@ -264,7 +271,7 @@ async def test_import_all_yaml_files( ) await obj.save(db=db) - await repo.import_all_yaml_files(branch_name="main", commit=commit, exclude=["artifact_definitions"]) + await repo.import_rfiles(branch_name="main", commit=commit, config_file=config_file) modified_rfile = await client.get(kind="CoreRFile", id=rfiles[0].id) assert modified_rfile.template_path.value == rfile_template_path_value_before_change diff --git a/backend/tests/integration/user_workflows/test_user_worflow.py b/backend/tests/integration/user_workflows/test_user_worflow.py index f545702550..44084ed444 100644 --- a/backend/tests/integration/user_workflows/test_user_worflow.py +++ b/backend/tests/integration/user_workflows/test_user_worflow.py @@ -115,9 +115,9 @@ device: { id: $device }, name: { value: $intf_name }, description: { value: $description }, - role: { id: $role }, + role: { value: $role }, speed: { value: $speed }, - status: { id: $status } + status: { value: $status } }) { ok diff --git a/backend/tests/unit/api/conftest.py b/backend/tests/unit/api/conftest.py index dca86afa0e..d698627c09 100644 --- a/backend/tests/unit/api/conftest.py +++ b/backend/tests/unit/api/conftest.py @@ -4,6 +4,7 @@ import pytest from fastapi.testclient import TestClient +from infrahub import config from infrahub.core.initialization import create_branch from infrahub.core.manager import NodeManager from infrahub.core.node import Node @@ -28,6 +29,15 @@ def admin_headers(): return {"X-INFRAHUB-KEY": "admin-security"} +@pytest.fixture +def rpc_bus(helper): + original = config.OVERRIDE.message_bus + bus = helper.get_message_bus_rpc() + config.OVERRIDE.message_bus = bus + yield bus + config.OVERRIDE.message_bus = original + + @pytest.fixture async def car_person_data( db: InfrahubDatabase, register_core_models_schema, car_person_schema, first_account diff --git a/backend/tests/unit/api/diff/__init__.py b/backend/tests/unit/api/diff/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/backend/tests/unit/api/diff/test_diff_query_validation.py b/backend/tests/unit/api/diff/test_diff_query_validation.py new file mode 100644 index 0000000000..14c44c4428 --- /dev/null +++ b/backend/tests/unit/api/diff/test_diff_query_validation.py @@ -0,0 +1,42 @@ +import pytest +from pydantic import ValidationError + +from infrahub.api.diff.validation_models import DiffQueryValidated +from infrahub.core.branch import Branch + + +class TestDiffQueryValidation: + def setup_method(self): + self.branch = Branch(name="abc") + self.time_start_str = "2023-06-11" + self.time_end_str = "2023-06-13" + + def test_valid_query(self): + query = DiffQueryValidated( + branch=self.branch, time_from=self.time_start_str, time_to=self.time_end_str, branch_only=True + ) + + assert query.branch == self.branch + assert query.time_from == self.time_start_str + assert query.time_to == self.time_end_str + assert query.branch_only is True + + def test_invalid_time_from(self): + with pytest.raises(ValidationError): + DiffQueryValidated(branch=self.branch, time_from="notatime") + + def test_invalid_time_to(self): + with pytest.raises(ValidationError): + DiffQueryValidated(branch=self.branch, time_to="notatime") + + def test_invalid_time_range(self): + with pytest.raises(ValidationError, match="time_from and time_to are not a valid time range"): + DiffQueryValidated( + branch=self.branch, time_from=self.time_end_str, time_to=self.time_start_str, branch_only=True + ) + + def test_time_from_required_for_default_branch(self): + self.branch.is_default = True + + with pytest.raises(ValidationError, match="time_from is mandatory when diffing on the default branch `abc`."): + DiffQueryValidated(branch=self.branch, branch_only=True) diff --git a/backend/tests/unit/api/test_10_transformation_api.py b/backend/tests/unit/api/test_10_transformation_api.py index 22f8350da4..cb333cc11e 100644 --- a/backend/tests/unit/api/test_10_transformation_api.py +++ b/backend/tests/unit/api/test_10_transformation_api.py @@ -16,7 +16,7 @@ def patch_rpc_client(): async def test_transform_endpoint( - db: InfrahubDatabase, client_headers, default_branch, patch_rpc_client, register_core_models_schema, car_person_data + db: InfrahubDatabase, client_headers, default_branch, rpc_bus, register_core_models_schema, car_person_data ): from infrahub.server import app @@ -44,7 +44,7 @@ async def test_transform_endpoint( response_class="transform_response", response_data={"transformed_data": {"KEY1": "value1", "KEY2": "value2"}}, ) - await client.app.state.rpc_client.add_mock_reply(response=mock_response) + rpc_bus.add_mock_reply(response=mock_response) response = client.get( "/api/transform/mytransform", @@ -58,9 +58,7 @@ async def test_transform_endpoint( assert result == {"KEY1": "value1", "KEY2": "value2"} -async def test_transform_endpoint_path( - db: InfrahubDatabase, client_headers, patch_rpc_client, default_branch, car_person_data -): +async def test_transform_endpoint_path(db: InfrahubDatabase, client_headers, rpc_bus, default_branch, car_person_data): from infrahub.server import app client = TestClient(app) @@ -86,7 +84,7 @@ async def test_transform_endpoint_path( response_class="transform_response", response_data={"transformed_data": {"KEY1": "value1", "KEY2": "value2"}}, ) - await client.app.state.rpc_client.add_mock_reply(response=mock_response) + rpc_bus.add_mock_reply(response=mock_response) response = client.get( "/api/transform/my/transform/function", @@ -101,7 +99,7 @@ async def test_transform_endpoint_path( async def test_rfile_endpoint( - db: InfrahubDatabase, client_headers, default_branch, patch_rpc_client, register_core_models_schema, car_person_data + db: InfrahubDatabase, client_headers, default_branch, rpc_bus, register_core_models_schema, car_person_data ): from infrahub.server import app @@ -126,7 +124,7 @@ async def test_rfile_endpoint( response_class="template_response", response_data={"rendered_template": "Rendered by a mocked agent"}, ) - await client.app.state.rpc_client.add_mock_reply(response=mock_response) + rpc_bus.add_mock_reply(response=mock_response) response = client.get( "/api/rfile/test-rfile", diff --git a/backend/tests/unit/api/test_11_artifact.py b/backend/tests/unit/api/test_11_artifact.py index 4fa6f29f67..7a53a60acd 100644 --- a/backend/tests/unit/api/test_11_artifact.py +++ b/backend/tests/unit/api/test_11_artifact.py @@ -14,12 +14,14 @@ def patch_rpc_client(): infrahub.message_bus.rpc.InfrahubRpcClient = InfrahubRpcClientTesting +@pytest.mark.xfail(reason="FIXME: #1627, working in standalone but failing when it's part of the testsuite") async def test_artifact_definition_endpoint( db: InfrahubDatabase, admin_headers, default_branch, patch_rpc_client, register_core_models_schema, + register_builtin_models_schema, car_person_data_generic, authentication_base, ): diff --git a/backend/tests/unit/api/test_12_file.py b/backend/tests/unit/api/test_12_file.py index 194be592ad..7e7d790623 100644 --- a/backend/tests/unit/api/test_12_file.py +++ b/backend/tests/unit/api/test_12_file.py @@ -1,24 +1,15 @@ -import pytest from fastapi.testclient import TestClient from infrahub.core.node import Node from infrahub.database import InfrahubDatabase from infrahub.message_bus import InfrahubResponse -from infrahub.message_bus.rpc import InfrahubRpcClientTesting - - -@pytest.fixture -def patch_rpc_client(): - import infrahub.message_bus.rpc - - infrahub.message_bus.rpc.InfrahubRpcClient = InfrahubRpcClientTesting async def test_get_file( db: InfrahubDatabase, client_headers, default_branch, - patch_rpc_client, + rpc_bus, register_core_models_schema, ): from infrahub.server import app @@ -43,7 +34,7 @@ async def test_get_file( assert response.status_code == 400 # With Manual Commit - await client.app.state.rpc_client.add_mock_reply(response=mock_response) + rpc_bus.add_mock_reply(response=mock_response) response = client.get( f"/api/file/{r1.id}/myfile.text?commit=12345678iuytrewqwertyu", @@ -57,7 +48,7 @@ async def test_get_file( r1.commit.value = "1345754212345678iuytrewqwertyu" await r1.save(db=db) - await client.app.state.rpc_client.add_mock_reply(response=mock_response) + rpc_bus.add_mock_reply(response=mock_response) response = client.get( f"/api/file/{r1.id}/myfile.text", @@ -68,7 +59,7 @@ async def test_get_file( assert response.text == "file content" # Access Repo by name - await client.app.state.rpc_client.add_mock_reply(response=mock_response) + rpc_bus.add_mock_reply(response=mock_response) response = client.get( "/api/file/repo01/myfile.text", diff --git a/backend/tests/unit/api/test_15_diff.py b/backend/tests/unit/api/test_15_diff.py index 0a9604797f..32c0af60bb 100644 --- a/backend/tests/unit/api/test_15_diff.py +++ b/backend/tests/unit/api/test_15_diff.py @@ -1,7 +1,7 @@ import pytest from deepdiff import DeepDiff -from infrahub.api.diff import get_display_labels, get_display_labels_per_kind +from infrahub.api.diff.diff import get_display_labels, get_display_labels_per_kind from infrahub.core.initialization import create_branch from infrahub.core.manager import NodeManager from infrahub.core.node import Node diff --git a/backend/tests/unit/api/test_40_schema_api.py b/backend/tests/unit/api/test_40_schema_api.py index ad217a80d6..dddeca79f1 100644 --- a/backend/tests/unit/api/test_40_schema_api.py +++ b/backend/tests/unit/api/test_40_schema_api.py @@ -82,6 +82,73 @@ async def test_schema_read_endpoint_wrong_branch( assert response.json() is not None +async def test_schema_summary_default_branch( + db: InfrahubDatabase, + client, + client_headers, + default_branch: Branch, + car_person_schema_generics: SchemaRoot, + car_person_data_generic, +): + with client: + response = client.get( + "/api/schema/summary", + headers=client_headers, + ) + + assert response.status_code == 200 + assert response.json() is not None + + schema = response.json() + + assert "nodes" in schema + assert "generics" in schema + assert isinstance(schema["nodes"]["BuiltinTag"], str) + + +async def test_schema_kind_default_branch( + db: InfrahubDatabase, + client, + client_headers, + default_branch: Branch, + car_person_schema_generics: SchemaRoot, + car_person_data_generic, +): + with client: + response = client.get( + "/api/schema/BuiltinTag", + headers=client_headers, + ) + + assert response.status_code == 200 + assert response.json() is not None + + schema = response.json() + + assert "id" in schema + assert "hash" in schema + assert "filters" in schema + assert "relationships" in schema + + +async def test_schema_kind_not_valid( + db: InfrahubDatabase, + client, + client_headers, + default_branch: Branch, + car_person_schema_generics: SchemaRoot, + car_person_data_generic, +): + with client: + response = client.get( + "/api/schema/NotPresent", + headers=client_headers, + ) + + assert response.status_code == 422 + assert response.json()["errors"][0]["message"] == "Unable to find the schema 'NotPresent' in the registry" + + async def test_schema_load_endpoint_valid_simple( db: InfrahubDatabase, client: TestClient, @@ -110,7 +177,7 @@ async def test_schema_load_endpoint_valid_simple( assert attributes["description"] == 900 assert attributes["type"] == 3000 assert relationships["interfaces"] == 450 - assert relationships["tags"] == 5000 + assert relationships["tags"] == 7000 async def test_schema_load_restricted_namespace( @@ -162,7 +229,7 @@ async def test_schema_load_endpoint_idempotent_simple( assert attributes["description"] == 900 assert attributes["type"] == 3000 assert relationships["interfaces"] == 450 - assert relationships["tags"] == 5000 + assert relationships["tags"] == 7000 creation = client.post( "/api/schema/load", headers=admin_headers, json={"schemas": [helper.schema_file("infra_simple_01.json")]} @@ -335,7 +402,7 @@ async def test_schema_load_endpoint_not_valid_simple_05( ) assert response.status_code == 422 - assert response.json()["detail"][0]["msg"] == "Name can not be set to a reserved keyword 'class' is not allowed." + assert response.json()["detail"][0]["msg"] == "Name can not be set to a reserved keyword 'None' is not allowed." async def test_schema_load_endpoint_not_valid_with_generics_02( diff --git a/backend/tests/unit/api/test_api_exception_handler.py b/backend/tests/unit/api/test_api_exception_handler.py new file mode 100644 index 0000000000..bdb366dd83 --- /dev/null +++ b/backend/tests/unit/api/test_api_exception_handler.py @@ -0,0 +1,79 @@ +from json import loads +from typing import Optional + +from pydantic import BaseModel, ValidationError, root_validator, validator + +from infrahub.api.exception_handlers import generic_api_exception_handler +from infrahub.exceptions import Error + + +class ModelForTesting(BaseModel): + field_1: Optional[str] + + @validator("field_1", always=True) + def always_fail(cls, *args, **kwargs): + raise ValueError("this is the error message") + + @root_validator() + def always_fail_2(cls, values): + raise ValueError("another error message") + + +class MockError(Error): + HTTP_CODE = 418 + DESCRIPTION = "the teapot error" + + def __init__(self, message: Optional[str]): + self.message = message + + +class TestAPIExceptionHandler: + def setup_method(self): + self.error_message = "this is the error message" + + async def test_plain_exception_error(self): + exception = ValueError(self.error_message) + + error_response = await generic_api_exception_handler(None, exception) + + error_dict = loads(error_response.body.decode()) + assert error_dict["errors"] == [{"message": self.error_message, "extensions": {"code": 500}}] + + async def test_pydantic_validation_error(self): + error_message_2 = "another error message" + exception = None + try: + ModelForTesting(field_1="abc") + except ValidationError as exc: + exception = exc + + error_response = await generic_api_exception_handler(None, exception, http_code=400) + + error_dict = loads(error_response.body.decode()) + assert {"message": self.error_message, "extensions": {"code": 400}} in error_dict["errors"] + assert {"message": error_message_2, "extensions": {"code": 400}} in error_dict["errors"] + assert len(error_dict) == 2 + + async def test_infrahub_api_error(self): + exception = MockError(self.error_message) + + error_response = await generic_api_exception_handler(None, exception) + + error_dict = loads(error_response.body.decode()) + assert error_dict["errors"] == [{"message": self.error_message, "extensions": {"code": 418}}] + + async def test_infrahub_api_error_default_message(self): + exception = MockError(None) + + error_response = await generic_api_exception_handler(None, exception) + + error_dict = loads(error_response.body.decode()) + assert error_dict["errors"] == [{"message": "the teapot error", "extensions": {"code": 418}}] + + async def test_infrahub_api_error_code_override(self): + exception = MockError(None) + + error_response = await generic_api_exception_handler(None, exception, http_code=500) + + error_dict = loads(error_response.body.decode()) + assert error_dict["errors"] == [{"message": "the teapot error", "extensions": {"code": 418}}] diff --git a/backend/tests/unit/conftest.py b/backend/tests/unit/conftest.py index ec3e249e71..9066a0db84 100644 --- a/backend/tests/unit/conftest.py +++ b/backend/tests/unit/conftest.py @@ -131,14 +131,27 @@ async def git_fixture_repo(git_sources_dir, git_repos_dir, helper) -> InfrahubRe @pytest.fixture def local_storage_dir(tmp_path) -> str: storage_dir = os.path.join(str(tmp_path), "storage") - os.mkdir(storage_dir) - config.SETTINGS.storage.settings = {"directory": storage_dir} + config.SETTINGS.storage.driver = config.StorageDriver.FileSystemStorage + config.SETTINGS.storage.local = config.FileSystemStorageSettings(path=storage_dir) return storage_dir +@pytest.fixture +def s3_storage_bucket() -> str: + bucket_name = "mocked" + config.SETTINGS.storage.driver = config.StorageDriver.InfrahubS3ObjectStorage + config.SETTINGS.storage.s3 = config.S3StorageSettings( + AWS_S3_BUCKET_NAME=bucket_name, + AWS_ACCESS_KEY_ID="some_id", + AWS_SECRET_ACCESS_KEY="secret_key", + AWS_S3_ENDPOINT_URL="storage.googleapis.com", + ) + return config.SETTINGS.storage.s3.endpoint_url + + @pytest.fixture def file1_in_storage(local_storage_dir, helper) -> str: fixture_dir = helper.get_fixtures_dir() @@ -978,7 +991,9 @@ async def base_dataset_03(db: InfrahubDatabase, default_branch: Branch, person_t @pytest.fixture -async def base_dataset_04(db: InfrahubDatabase, default_branch: Branch, register_core_models_schema) -> dict: +async def base_dataset_04( + db: InfrahubDatabase, default_branch: Branch, register_core_models_schema, register_organization_schema +) -> dict: time0 = pendulum.now(tz="UTC") params = { "main_branch": "main", @@ -1133,7 +1148,13 @@ async def car_person_schema(db: InfrahubDatabase, default_branch: Branch, node_g {"name": "is_electric", "kind": "Boolean"}, ], "relationships": [ - {"name": "owner", "peer": "TestPerson", "optional": False, "cardinality": "one"}, + { + "name": "owner", + "peer": "TestPerson", + "optional": False, + "cardinality": "one", + "direction": "outbound", + }, ], }, { @@ -1146,7 +1167,60 @@ async def car_person_schema(db: InfrahubDatabase, default_branch: Branch, node_g {"name": "name", "kind": "Text", "unique": True}, {"name": "height", "kind": "Number", "optional": True}, ], - "relationships": [{"name": "cars", "peer": "TestCar", "cardinality": "many"}], + "relationships": [{"name": "cars", "peer": "TestCar", "cardinality": "many", "direction": "inbound"}], + }, + ], + } + + schema = SchemaRoot(**SCHEMA) + registry.schema.register_schema(schema=schema, branch=default_branch.name) + + +@pytest.fixture +async def choices_schema(db: InfrahubDatabase, default_branch: Branch, node_group_schema) -> None: + SCHEMA = { + "generics": [ + { + "name": "Choice", + "namespace": "Base", + "default_filter": "name__value", + "display_labels": ["name__value", "color__value"], + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + {"name": "color", "kind": "Text", "enum": ["red", "green", "blue"], "optional": True}, + {"name": "measuring_system", "kind": "Text", "enum": ["metric"], "optional": True}, + {"name": "description", "kind": "Text", "optional": True}, + { + "name": "section", + "kind": "Dropdown", + "optional": True, + "choices": [ + {"name": "backend", "label": "Backend", "color": ""}, + {"name": "frontend", "label": "Frontend", "color": "#0000ff"}, + ], + }, + ], + }, + ], + "nodes": [ + { + "name": "Choice", + "namespace": "Test", + "default_filter": "name__value", + "display_labels": ["name__value", "color__value"], + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "status", "kind": "Text", "enum": ["active", "passive"]}, + {"name": "comment", "kind": "Text", "optional": True}, + { + "name": "temperature_scale", + "kind": "Dropdown", + "optional": True, + "choices": [{"name": "celsius", "label": "Celsius"}], + }, + ], + "inherit_from": ["BaseChoice"], }, ], } @@ -1484,12 +1558,13 @@ async def person_tag_schema(db: InfrahubDatabase, default_branch: Branch, data_s {"name": "lastname", "kind": "Text"}, ], "relationships": [ - {"name": "tags", "peer": "BuiltinTag", "cardinality": "many"}, + {"name": "tags", "peer": "BuiltinTag", "cardinality": "many", "direction": "inbound"}, { "name": "primary_tag", "peer": "BuiltinTag", "identifier": "person_primary_tag", "cardinality": "one", + "direction": "outbound", }, ], }, @@ -1762,6 +1837,15 @@ async def criticality_schema(db: InfrahubDatabase, default_branch: Branch, group {"name": "json_no_default", "kind": "JSON", "optional": True}, {"name": "json_default", "kind": "JSON", "default_value": {"value": "bob"}}, {"name": "description", "kind": "Text", "optional": True}, + { + "name": "status", + "kind": "Dropdown", + "optional": True, + "choices": [ + {"name": "active", "color": "#00ff00", "description": "Online things"}, + {"name": "passive", "label": "Redundancy nodes not in the active path"}, + ], + }, ], } @@ -2124,6 +2208,142 @@ async def register_core_models_schema(default_branch: Branch, register_internal_ return schema_branch +@pytest.fixture +async def organization_schema() -> SchemaRoot: + SCHEMA = { + "nodes": [ + { + "name": "Organization", + "namespace": "Core", + "description": "An organization represent a legal entity, a company.", + "include_in_menu": True, + "label": "Organization", + "icon": "mdi:domain", + "default_filter": "name__value", + "order_by": ["name__value"], + "display_labels": ["label__value"], + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + {"name": "label", "kind": "Text", "optional": True}, + {"name": "description", "kind": "Text", "optional": True}, + ], + "relationships": [ + { + "name": "tags", + "peer": "BuiltinTag", + "kind": "Attribute", + "optional": True, + "cardinality": "many", + }, + ], + }, + ] + } + + return SchemaRoot(**SCHEMA) + + +@pytest.fixture +async def builtin_schema() -> SchemaRoot: + SCHEMA = { + "nodes": [ + { + "name": "Status", + "namespace": "Builtin", + "description": "Represent the status of an object: active, maintenance", + "include_in_menu": True, + "icon": "mdi:list-status", + "label": "Status", + "default_filter": "name__value", + "order_by": ["name__value"], + "display_labels": ["label__value"], + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + {"name": "label", "kind": "Text", "optional": True}, + {"name": "description", "kind": "Text", "optional": True}, + ], + }, + { + "name": "Role", + "namespace": "Builtin", + "description": "Represent the role of an object", + "include_in_menu": True, + "icon": "mdi:ballot", + "label": "Role", + "default_filter": "name__value", + "order_by": ["name__value"], + "display_labels": ["label__value"], + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + {"name": "label", "kind": "Text", "optional": True}, + {"name": "description", "kind": "Text", "optional": True}, + ], + }, + { + "name": "Location", + "namespace": "Builtin", + "description": "A location represent a physical element: a building, a site, a city", + "include_in_menu": True, + "icon": "mdi:map-marker-radius-outline", + "label": "Location", + "default_filter": "name__value", + "order_by": ["name__value"], + "display_labels": ["name__value"], + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + {"name": "description", "kind": "Text", "optional": True}, + {"name": "type", "kind": "Text"}, + ], + "relationships": [ + { + "name": "tags", + "peer": "BuiltinTag", + "kind": "Attribute", + "optional": True, + "cardinality": "many", + }, + ], + }, + { + "name": "Criticality", + "namespace": "Builtin", + "description": "Level of criticality expressed from 1 to 10.", + "include_in_menu": True, + "icon": "mdi:alert-octagon-outline", + "label": "Criticality", + "default_filter": "name__value", + "order_by": ["name__value"], + "display_labels": ["name__value"], + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + {"name": "level", "kind": "Number", "enum": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}, + {"name": "description", "kind": "Text", "optional": True}, + ], + }, + ] + } + + return SchemaRoot(**SCHEMA) + + +@pytest.fixture +async def register_builtin_models_schema(default_branch: Branch, builtin_schema: SchemaRoot) -> SchemaBranch: + schema_branch = registry.schema.register_schema(schema=builtin_schema, branch=default_branch.name) + default_branch.update_schema_hash() + return schema_branch + + +@pytest.fixture +async def register_organization_schema(default_branch: Branch, organization_schema: SchemaRoot) -> SchemaBranch: + schema_branch = registry.schema.register_schema(schema=organization_schema, branch=default_branch.name) + default_branch.update_schema_hash() + return schema_branch + + @pytest.fixture async def register_core_schema_db(db: InfrahubDatabase, default_branch: Branch, register_core_models_schema) -> None: await registry.schema.load_schema_to_db(schema=register_core_models_schema, branch=default_branch, db=db) @@ -2167,6 +2387,8 @@ async def authentication_base( default_branch: Branch, create_test_admin, register_core_models_schema, + register_builtin_models_schema, + register_organization_schema, ): pass diff --git a/backend/tests/unit/core/test_attribute.py b/backend/tests/unit/core/test_attribute.py index 2a1ea6d996..fa6e70e6b5 100644 --- a/backend/tests/unit/core/test_attribute.py +++ b/backend/tests/unit/core/test_attribute.py @@ -1,7 +1,7 @@ import pytest from infrahub_sdk import UUIDT -from infrahub.core.attribute import Integer, IPHost, IPNetwork, String +from infrahub.core.attribute import Dropdown, Integer, IPHost, IPNetwork, String from infrahub.core.branch import Branch from infrahub.core.manager import NodeManager from infrahub.core.node import Node @@ -84,6 +84,94 @@ async def test_validate_format_ipnetwork_and_iphost( ) +async def test_validate_iphost_returns(db: InfrahubDatabase, default_branch: Branch, criticality_schema: NodeSchema): + schema = criticality_schema.get_attribute("name") + + test_ipv4 = IPHost( + name="test", schema=schema, branch=default_branch, at=Timestamp(), node=None, data="192.0.2.1/31" + ) + test_ipv6 = IPHost( + name="test", schema=schema, branch=default_branch, at=Timestamp(), node=None, data="2001:db8::/32" + ) + + assert test_ipv4.value == "192.0.2.1/31" + assert test_ipv4.ip == "192.0.2.1" + assert test_ipv4.hostmask == "0.0.0.1" + assert test_ipv4.netmask == "255.255.255.254" + assert test_ipv4.network == "192.0.2.0/31" + assert test_ipv4.prefixlen == "31" + assert test_ipv4.with_hostmask == "192.0.2.1/0.0.0.1" + assert test_ipv4.with_netmask == "192.0.2.1/255.255.255.254" + assert test_ipv4.version == 4 + + assert test_ipv6.value == "2001:db8::/32" + assert test_ipv6.ip == "2001:db8::" + assert test_ipv6.hostmask == "::ffff:ffff:ffff:ffff:ffff:ffff" + assert test_ipv6.netmask == "ffff:ffff::" + assert test_ipv6.network == "2001:db8::/32" + assert test_ipv6.prefixlen == "32" + assert test_ipv6.with_hostmask == "2001:db8::/::ffff:ffff:ffff:ffff:ffff:ffff" + assert test_ipv6.with_netmask == "2001:db8::/ffff:ffff::" + assert test_ipv6.version == 6 + + +async def test_validate_ipnetwork_returns(db: InfrahubDatabase, default_branch: Branch, criticality_schema: NodeSchema): + schema = criticality_schema.get_attribute("name") + + test_ipv4 = IPNetwork( + name="test", schema=schema, branch=default_branch, at=Timestamp(), node=None, data="192.0.2.0/31" + ) + test_ipv6 = IPNetwork( + name="test", schema=schema, branch=default_branch, at=Timestamp(), node=None, data="2001:db8::/32" + ) + + assert test_ipv4.value == "192.0.2.0/31" + assert test_ipv4.broadcast_address == "192.0.2.1" + assert test_ipv4.hostmask == "0.0.0.1" + assert test_ipv4.netmask == "255.255.255.254" + assert test_ipv4.prefixlen == "31" + assert test_ipv4.num_addresses == 2 + assert test_ipv4.with_hostmask == "192.0.2.0/0.0.0.1" + assert test_ipv4.with_netmask == "192.0.2.0/255.255.255.254" + assert test_ipv4.version == 4 + + assert test_ipv6.value == "2001:db8::/32" + assert test_ipv6.broadcast_address == "2001:db8:ffff:ffff:ffff:ffff:ffff:ffff" + assert test_ipv6.hostmask == "::ffff:ffff:ffff:ffff:ffff:ffff" + assert test_ipv6.netmask == "ffff:ffff::" + assert test_ipv6.prefixlen == "32" + assert test_ipv6.num_addresses == 79228162514264337593543950336 + assert test_ipv6.with_hostmask == "2001:db8::/::ffff:ffff:ffff:ffff:ffff:ffff" + assert test_ipv6.with_netmask == "2001:db8::/ffff:ffff::" + assert test_ipv6.version == 6 + + +async def test_validate_content_dropdown(db: InfrahubDatabase, default_branch: Branch, criticality_schema: NodeSchema): + schema = criticality_schema.get_attribute("status") + Dropdown(name="test", schema=schema, branch=default_branch, at=Timestamp(), node=None, data="active") + + with pytest.raises(ValidationError) as exc: + Dropdown(name="test", schema=schema, branch=default_branch, at=Timestamp(), node=None, data="invalid-choice") + assert "invalid-choice must be one of" in str(exc.value) + + +async def test_dropdown_properties(db: InfrahubDatabase, default_branch: Branch, criticality_schema: NodeSchema): + schema = criticality_schema.get_attribute("status") + active = Dropdown(name="test", schema=schema, branch=default_branch, at=Timestamp(), node=None, data="active") + passive = Dropdown(name="test", schema=schema, branch=default_branch, at=Timestamp(), node=None, data="passive") + + assert active.value == "active" + assert active.description == "Online things" + assert active.label == "Active" + # The color of the active choice is hardoced within criticality_schema + assert active.color == "#00ff00" + assert passive.value == "passive" + assert passive.description == "" + assert passive.label == "Redundancy nodes not in the active path" + # The color of the passive choice comes from the color selector in infrahub.visuals + assert passive.color == "#ed6a5a" + + async def test_validate_format_string(db: InfrahubDatabase, default_branch: Branch, criticality_schema: NodeSchema): name_schema = criticality_schema.get_attribute("name") @@ -194,9 +282,9 @@ async def test_get_query_filter_string_value(db: InfrahubDatabase, default_branc ) expected_response = [ "(n)", - "[:HAS_ATTRIBUTE]", + "-[:HAS_ATTRIBUTE]-", "(i:Attribute { name: $attr_description_name })", - "[:HAS_VALUE]", + "-[:HAS_VALUE]-", "(av:AttributeValue { value: $attr_description_value })", ] assert [str(item) for item in filters] == expected_response @@ -207,9 +295,9 @@ async def test_get_query_filter_string_value(db: InfrahubDatabase, default_branc name="description", filter_name="value", filter_value="test", include_match=False ) expected_response = [ - "[:HAS_ATTRIBUTE]", + "-[:HAS_ATTRIBUTE]-", "(i:Attribute { name: $attr_description_name })", - "[:HAS_VALUE]", + "-[:HAS_VALUE]-", "(av:AttributeValue { value: $attr_description_value })", ] assert [str(item) for item in filters] == expected_response @@ -221,9 +309,9 @@ async def test_get_query_filter_any(db: InfrahubDatabase, default_branch: Branch filters, params, matchs = await String.get_query_filter(name="any", filter_name="value", filter_value="test") expected_response = [ "(n)", - "[:HAS_ATTRIBUTE]", + "-[:HAS_ATTRIBUTE]-", "(i:Attribute)", - "[:HAS_VALUE]", + "-[:HAS_VALUE]-", "(av:AttributeValue { value: $attr_any_value })", ] assert [str(item) for item in filters] == expected_response @@ -237,9 +325,9 @@ async def test_get_query_filter_flag_property(db: InfrahubDatabase, default_bran ) expected_response = [ "(n)", - "[:HAS_ATTRIBUTE]", + "-[:HAS_ATTRIBUTE]-", "(i:Attribute { name: $attr_descr_name })", - "[:IS_PROTECTED]", + "-[:IS_PROTECTED]-", "(ap:Boolean { value: $attr_descr_is_protected })", ] assert [str(item) for item in filters] == expected_response @@ -251,9 +339,9 @@ async def test_get_query_filter_any_node_property(db: InfrahubDatabase, default_ filters, params, matchs = await String.get_query_filter(name="any", filter_name="source__id", filter_value="abcdef") expected_response = [ "(n)", - "[:HAS_ATTRIBUTE]", + "-[:HAS_ATTRIBUTE]-", "(i:Attribute)", - "[:HAS_SOURCE]", + "-[:HAS_SOURCE]-", "(ap:CoreNode { uuid: $attr_any_source_id })", ] assert [str(item) for item in filters] == expected_response @@ -261,6 +349,27 @@ async def test_get_query_filter_any_node_property(db: InfrahubDatabase, default_ assert matchs == [] +async def test_get_query_filter_multiple_values(db: InfrahubDatabase, default_branch: Branch): + filters, params, matchs = await String.get_query_filter( + name="name", filter_name="values", filter_value=["test1", "test2"] + ) + expected_response = [ + "(n)", + "-[:HAS_ATTRIBUTE]-", + "(i:Attribute { name: $attr_name_name })", + "-[:HAS_VALUE]-", + "(av:AttributeValue)", + ] + assert [str(item) for item in filters] == expected_response + assert params == {"attr_name_name": "name", "attr_name_value": ["test1", "test2"]} + assert matchs == ["av.value IN $attr_name_value"] + + +async def test_get_query_filter_multiple_values_invalid_type(db: InfrahubDatabase, default_branch: Branch): + with pytest.raises(TypeError): + await String.get_query_filter(name="name", filter_name="values", filter_value=["test1", 1.0]) + + async def test_base_serialization(db: InfrahubDatabase, default_branch: Branch, all_attribute_types_schema): obj1 = await Node.init(db=db, schema="TestAllAttributeTypes") await obj1.new(db=db, name="obj1", mystring="abc", mybool=False, myint=123, mylist=["1", 2, False]) diff --git a/backend/tests/unit/core/test_attribute_query.py b/backend/tests/unit/core/test_attribute_query.py new file mode 100644 index 0000000000..c5d9043d48 --- /dev/null +++ b/backend/tests/unit/core/test_attribute_query.py @@ -0,0 +1,15 @@ +from infrahub.core.branch import Branch +from infrahub.core.node import Node +from infrahub.core.query.attribute import AttributeGetQuery +from infrahub.database import InfrahubDatabase + + +async def test_AttributeGetQuery(db: InfrahubDatabase, default_branch: Branch, car_person_schema): + obj = await Node.init(db=db, schema="TestPerson", branch=default_branch) + await obj.new(db=db, name="John", height=180) + await obj.save(db=db) + + query = await AttributeGetQuery.init(db=db, attr=obj.name) + await query.execute(db=db) + + assert query.num_of_results == 3 diff --git a/backend/tests/unit/core/test_branch_diff.py b/backend/tests/unit/core/test_branch_diff.py index 1ca131c9b4..98a2420d11 100644 --- a/backend/tests/unit/core/test_branch_diff.py +++ b/backend/tests/unit/core/test_branch_diff.py @@ -15,6 +15,18 @@ from infrahub.database import InfrahubDatabase from infrahub.message_bus import InfrahubResponse from infrahub.message_bus.rpc import InfrahubRpcClientTesting +from infrahub.services import services + + +@pytest.fixture +def patch_services(helper): + original = services.service.message_bus + bus = helper.get_message_bus_rpc() + services.service.message_bus = bus + services.prepare(service=services.service) + yield bus + services.service.message_bus = original + services.prepare(service=services.service) async def test_diff_has_changes_graph(db: InfrahubDatabase, base_dataset_02): @@ -130,7 +142,9 @@ async def test_diff_get_modified_paths_graph(db: InfrahubDatabase, base_dataset_ assert modified_branch1 == sorted(expected_paths_branch1) -async def test_diff_get_files_repository(db: InfrahubDatabase, rpc_client, repos_in_main, base_dataset_02): +async def test_diff_get_files_repository( + db: InfrahubDatabase, rpc_client, repos_in_main, base_dataset_02, patch_services +): mock_response = InfrahubResponse( response_class="diffnames_response", response_data={ @@ -140,7 +154,7 @@ async def test_diff_get_files_repository(db: InfrahubDatabase, rpc_client, repos }, ) - await rpc_client.add_mock_reply(response=mock_response) + patch_services.add_mock_reply(response=mock_response) branch2 = await create_branch(branch_name="branch2", db=db) @@ -165,7 +179,7 @@ async def test_diff_get_files_repository(db: InfrahubDatabase, rpc_client, repos async def test_diff_get_files_repositories_for_branch_case01( - db: InfrahubDatabase, rpc_client: InfrahubRpcClientTesting, default_branch: Branch, repos_in_main + db: InfrahubDatabase, rpc_client: InfrahubRpcClientTesting, default_branch: Branch, repos_in_main, patch_services ): """Testing the get_modified_paths_repositories_for_branch_case01 method with 2 repositories in the database but only one has a different commit value between 2 and from so we expect only 2 files""" @@ -179,7 +193,7 @@ async def test_diff_get_files_repositories_for_branch_case01( }, ) - await rpc_client.add_mock_reply(response=mock_response) + patch_services.add_mock_reply(response=mock_response) branch2 = await create_branch(branch_name="branch2", db=db) @@ -200,7 +214,7 @@ async def test_diff_get_files_repositories_for_branch_case01( async def test_diff_get_files_repositories_for_branch_case02( - db: InfrahubDatabase, rpc_client: InfrahubRpcClientTesting, default_branch: Branch, repos_in_main + db: InfrahubDatabase, rpc_client: InfrahubRpcClientTesting, default_branch: Branch, repos_in_main, patch_services ): """Testing the get_modified_paths_repositories_for_branch_case01 method with 2 repositories in the database both repositories have a new commit value so we expect both to return something""" @@ -213,7 +227,7 @@ async def test_diff_get_files_repositories_for_branch_case02( "files_added": [], }, ) - await rpc_client.add_mock_reply(response=mock_response) + patch_services.add_mock_reply(response=mock_response) mock_response = InfrahubResponse( response_class="diffnames_response", @@ -223,7 +237,7 @@ async def test_diff_get_files_repositories_for_branch_case02( "files_added": [], }, ) - await rpc_client.add_mock_reply(response=mock_response) + patch_services.add_mock_reply(response=mock_response) branch2 = await create_branch(branch_name="branch2", db=db) @@ -248,7 +262,7 @@ async def test_diff_get_files_repositories_for_branch_case02( async def test_diff_get_files( - db: InfrahubDatabase, rpc_client: InfrahubRpcClientTesting, default_branch: Branch, repos_in_main + db: InfrahubDatabase, rpc_client: InfrahubRpcClientTesting, default_branch: Branch, repos_in_main, patch_services ): """Testing the get_modified_paths_repositories_for_branch_case01 method with 2 repositories in the database both repositories have a new commit value so we expect both to return something""" @@ -261,7 +275,7 @@ async def test_diff_get_files( "files_added": [], }, ) - await rpc_client.add_mock_reply(response=mock_response) + patch_services.add_mock_reply(response=mock_response) mock_response = InfrahubResponse( response_class="diffnames_response", @@ -271,7 +285,7 @@ async def test_diff_get_files( "files_added": [], }, ) - await rpc_client.add_mock_reply(response=mock_response) + patch_services.add_mock_reply(response=mock_response) branch2 = await create_branch(branch_name="branch2", db=db) diff --git a/backend/tests/unit/core/test_branch_merge.py b/backend/tests/unit/core/test_branch_merge.py index d5d658ca32..4828a99864 100644 --- a/backend/tests/unit/core/test_branch_merge.py +++ b/backend/tests/unit/core/test_branch_merge.py @@ -60,7 +60,9 @@ async def test_merge_graph_delete(db: InfrahubDatabase, base_dataset_02, registe assert len(persons) == 2 -async def test_merge_relationship_many(db: InfrahubDatabase, default_branch: Branch, register_core_models_schema): +async def test_merge_relationship_many( + db: InfrahubDatabase, default_branch: Branch, register_core_models_schema, register_organization_schema +): blue = await Node.init(db=db, schema="BuiltinTag", branch=default_branch) await blue.new(db=db, name="Blue", description="The Blue tag") await blue.save(db=db) diff --git a/backend/tests/unit/core/test_branch_rebase.py b/backend/tests/unit/core/test_branch_rebase.py index e9da1141e7..2d0718139f 100644 --- a/backend/tests/unit/core/test_branch_rebase.py +++ b/backend/tests/unit/core/test_branch_rebase.py @@ -42,7 +42,9 @@ async def test_rebase_graph_delete(db: InfrahubDatabase, base_dataset_02, regist assert len(persons) == 2 -async def test_merge_relationship_many(db: InfrahubDatabase, default_branch: Branch, register_core_models_schema): +async def test_merge_relationship_many( + db: InfrahubDatabase, default_branch: Branch, register_core_models_schema, register_organization_schema +): blue = await Node.init(db=db, schema="BuiltinTag", branch=default_branch) await blue.new(db=db, name="Blue", description="The Blue tag") await blue.save(db=db) diff --git a/backend/tests/unit/core/test_diff_init.py b/backend/tests/unit/core/test_diff_init.py new file mode 100644 index 0000000000..c1e61ec2fc --- /dev/null +++ b/backend/tests/unit/core/test_diff_init.py @@ -0,0 +1,46 @@ +from unittest.mock import AsyncMock, MagicMock + +import pytest + +from infrahub.core.branch import Branch, Diff +from infrahub.core.timestamp import Timestamp +from infrahub.exceptions import DiffFromRequiredOnDefaultBranchError, DiffRangeValidationError + + +class TestDiffInit: + def setup_method(self): + self.db = MagicMock() + self.origin_branch = Branch(name="origin") + self.created_at_str = "2023-11-01" + self.created_at_timestamp = Timestamp(self.created_at_str) + self.branch = AsyncMock(spec=Branch) + self.branch.name = "branch" + self.branch.is_default = False + self.branch.created_at = self.created_at_str + self.branch.get_origin_branch.return_value = self.origin_branch + + async def __call_system_under_test(self, branch, **kwargs): + return await Diff.init(self.db, branch, **kwargs) + + async def test_diff_from_required_for_default_branch(self): + self.branch.is_default = True + + with pytest.raises(DiffFromRequiredOnDefaultBranchError): + await self.__call_system_under_test(self.branch) + + async def test_diff_to_cannot_precede_diff_from(self): + bad_diff_to = "2023-10-31" + + with pytest.raises(DiffRangeValidationError): + await self.__call_system_under_test(self.branch, diff_to=bad_diff_to) + + async def test_diff_from_default_is_set(self): + diff_to_str = "2023-11-15" + + diff = await self.__call_system_under_test(self.branch, diff_to=diff_to_str) + + self.branch.get_origin_branch.assert_awaited_once_with(db=self.db) + assert diff.branch == self.branch + assert diff.origin_branch == self.origin_branch + assert diff.diff_from == self.created_at_timestamp + assert diff.diff_to == Timestamp(diff_to_str) diff --git a/backend/tests/unit/core/test_manager_node.py b/backend/tests/unit/core/test_manager_node.py index 189a6869d3..3b1df0e890 100644 --- a/backend/tests/unit/core/test_manager_node.py +++ b/backend/tests/unit/core/test_manager_node.py @@ -1,3 +1,4 @@ +import pytest from infrahub_sdk import UUIDT from infrahub.core import registry @@ -251,6 +252,21 @@ async def test_query_with_filter_string_int( assert len(nodes) == 1 +async def test_query_filter_with_multiple_values_string_int( + db: InfrahubDatabase, + default_branch: Branch, + criticality_schema, + criticality_low: Node, + criticality_medium: Node, + criticality_high: Node, +): + nodes = await NodeManager.query(db=db, schema=criticality_schema, filters={"level__values": [2, 3]}) + assert len(nodes) == 2 + + nodes = await NodeManager.query(db=db, schema=criticality_schema, filters={"name__values": ["medium", "low"]}) + assert len(nodes) == 2 + + async def test_query_with_filter_bool_rel( db: InfrahubDatabase, person_john_main, @@ -272,6 +288,41 @@ async def test_query_with_filter_bool_rel( assert len(nodes) == 2 +async def test_query_filter_with_multiple_values_rel( + db: InfrahubDatabase, + person_john_main, + person_jane_main, + car_accord_main, + car_volt_main, + car_yaris_main, + car_camry_main, + branch: Branch, +): + car = registry.get_schema(name="TestCar") + + nodes = await NodeManager.query(db=db, schema=car, branch=branch, filters={"owner__name__values": ["John", "Jane"]}) + assert len(nodes) == 4 + + +async def test_qeury_with_multiple_values_invalid_type( + db: InfrahubDatabase, + person_john_main, + person_jane_main, + car_accord_main, + car_volt_main, + car_yaris_main, + car_camry_main, + branch: Branch, +): + car = registry.get_schema(name="TestCar") + + with pytest.raises(TypeError): + await NodeManager.query(db=db, schema=car, branch=branch, filters={"owner__name__values": [1.0]}) + + with pytest.raises(TypeError): + await NodeManager.query(db=db, schema=car, branch=branch, filters={"owner__name__values": [None]}) + + async def test_query_non_default_class( db: InfrahubDatabase, default_branch: Branch, diff --git a/backend/tests/unit/core/test_manager_schema.py b/backend/tests/unit/core/test_manager_schema.py index b4fa7ed1fb..838ae526ca 100644 --- a/backend/tests/unit/core/test_manager_schema.py +++ b/backend/tests/unit/core/test_manager_schema.py @@ -342,11 +342,395 @@ async def test_schema_branch_generate_identifiers(schema_all_in_one): assert generic.relationships[1].identifier == "builtinstatus__infragenericinterface" -async def test_schema_branch_load_schema_extension(db: InfrahubDatabase, default_branch, helper): +async def test_schema_branch_validate_names(): + SCHEMA1 = { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + {"name": "name", "kind": "Text", "unique": True}, + ], + } + + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(nodes=[SCHEMA1])) + + with pytest.raises(ValueError) as exc: + schema.validate_names() + + assert str(exc.value) == "TestCriticality: Names of attributes and relationships must be unique : ['name']" + + SCHEMA2 = { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + {"name": "dupname", "kind": "Text"}, + ], + "relationships": [ + {"name": "dupname", "peer": "Criticality", "cardinality": "one"}, + ], + } + + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(nodes=[SCHEMA2])) + + with pytest.raises(ValueError) as exc: + schema.validate_names() + + assert str(exc.value) == "TestCriticality: Names of attributes and relationships must be unique : ['dupname']" + + +async def test_schema_branch_validate_identifiers(): + SCHEMA1 = { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + "relationships": [ + {"name": "first", "peer": "TestCriticality", "cardinality": "one"}, + {"name": "second", "peer": "TestCriticality", "cardinality": "one"}, + ], + } + + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(nodes=[SCHEMA1])) + schema.generate_identifiers() + + with pytest.raises(ValueError) as exc: + schema.validate_identifiers() + + assert ( + str(exc.value) == "TestCriticality: Identifier of relationships must be unique for a given direction > " + "'testcriticality__testcriticality' : [('first', 'bidirectional'), ('second', 'bidirectional')]" + ) + + SCHEMA2 = { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + "relationships": [ + {"name": "first", "peer": "TestCriticality", "cardinality": "one"}, + {"name": "second", "identifier": "something_unique", "peer": "TestCriticality", "cardinality": "one"}, + ], + } + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(nodes=[SCHEMA2])) + schema.generate_identifiers() + schema.validate_identifiers() + + +async def test_schema_branch_validate_identifiers_direction(): + SCHEMA1 = { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + "relationships": [ + {"name": "first", "peer": "TestCriticality", "cardinality": "one", "direction": "outbound"}, + {"name": "second", "peer": "TestCriticality", "cardinality": "one", "direction": "inbound"}, + ], + } + + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(nodes=[SCHEMA1])) + schema.generate_identifiers() + schema.validate_identifiers() + + SCHEMA2 = { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + "relationships": [ + {"name": "first", "peer": "TestCriticality", "cardinality": "one", "direction": "bidirectional"}, + {"name": "second", "peer": "TestCriticality", "cardinality": "one", "direction": "inbound"}, + ], + } + + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(nodes=[SCHEMA2])) + schema.generate_identifiers() + with pytest.raises(ValueError) as exc: + schema.validate_identifiers() + + assert ( + str(exc.value) == "TestCriticality: Identifier of relationships must be unique for a given direction > " + "'testcriticality__testcriticality' : [('first', 'bidirectional'), ('second', 'inbound')]" + ) + + +async def test_schema_branch_validate_identifiers_matching_direction(): + SCHEMA = { + "nodes": [ + { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + "relationships": [ + {"name": "first", "peer": "TestOther", "cardinality": "one", "direction": "outbound"}, + ], + }, + { + "name": "Other", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + "relationships": [ + {"name": "first", "peer": "TestCriticality", "cardinality": "one", "direction": "outbound"}, + ], + }, + ] + } + + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(**SCHEMA)) + schema.generate_identifiers() + with pytest.raises(ValueError) as exc: + schema.validate_identifiers() + + assert ( + str(exc.value) + == "TestOther: Incompatible direction detected on Reverse Relationship for 'first' ('testcriticality__testother') " + "outbound <> outbound" + ) + + SCHEMA["nodes"][0]["relationships"][0]["direction"] = "bidirectional" + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(**SCHEMA)) + schema.generate_identifiers() + with pytest.raises(ValueError) as exc: + schema.validate_identifiers() + + assert ( + str(exc.value) + == "TestOther: Incompatible direction detected on Reverse Relationship for 'first' ('testcriticality__testother') " + "bidirectional <> outbound" + ) + + # Validation is good with inbound <> outbound + SCHEMA["nodes"][0]["relationships"][0]["direction"] = "inbound" + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(**SCHEMA)) + schema.generate_identifiers() + schema.validate_identifiers() + + # Validation is good with bidirectional <> bidirectional + SCHEMA["nodes"][0]["relationships"][0]["direction"] = "bidirectional" + SCHEMA["nodes"][1]["relationships"][0]["direction"] = "bidirectional" + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(**SCHEMA)) + schema.generate_identifiers() + schema.validate_identifiers() + + assert True + + SCHEMA = { + "nodes": [ + { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + "relationships": [ + {"name": "first", "peer": "TestOther", "cardinality": "one", "direction": "outbound"}, + {"name": "second", "peer": "TestOther", "cardinality": "one", "direction": "inbound"}, + ], + }, + { + "name": "Other", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + "relationships": [ + {"name": "first", "peer": "TestCriticality", "cardinality": "one", "direction": "bidirectional"}, + ], + }, + ] + } + + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(**SCHEMA)) + schema.generate_identifiers() + with pytest.raises(ValueError) as exc: + schema.validate_identifiers() + + assert ( + str(exc.value) + == "TestOther: Incompatible direction detected on Reverse Relationship for 'first' ('testcriticality__testother') > bidirectional " + ) + + +async def test_schema_branch_validate_kinds_peer(): + SCHEMA1 = { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + "relationships": [ + {"name": "first", "peer": "TestNotPresent", "cardinality": "one"}, + ], + } + + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(nodes=[SCHEMA1])) + + with pytest.raises(ValueError) as exc: + schema.validate_kinds() + + assert str(exc.value) == "TestCriticality: Relationship 'first' is referencing an invalid peer 'TestNotPresent'" + + +async def test_schema_branch_validate_kinds_inherit(): + SCHEMA1 = { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "inherit_from": ["TestNotPresent"], + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + } + + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(nodes=[SCHEMA1])) + + with pytest.raises(ValueError) as exc: + schema.validate_kinds() + + assert str(exc.value) == "TestCriticality: 'TestNotPresent' is not a invalid Generic to inherit from" + + SCHEMA2 = { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + } + + SCHEMA3 = { + "name": "Other", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "inherit_from": ["TestCriticality"], + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + } + + schema = SchemaBranch(cache={}, name="test") + schema.load_schema(schema=SchemaRoot(nodes=[SCHEMA2, SCHEMA3])) + + with pytest.raises(ValueError) as exc: + schema.validate_kinds() + + assert ( + str(exc.value) + == "TestOther: Only generic model can be used as part of inherit_from, 'TestCriticality' is not a valid entry." + ) + + +async def test_schema_branch_validate_kinds_core(register_core_models_schema: SchemaBranch): + SCHEMA1 = { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "inherit_from": ["LineageOwner"], + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + "relationships": [ + {"name": "first", "peer": "CoreNode", "cardinality": "one"}, + ], + } + + register_core_models_schema.load_schema(schema=SchemaRoot(nodes=[SCHEMA1])) + register_core_models_schema.validate_kinds() + + +async def test_schema_branch_validate_menu_placement(): + """Validate that menu placements points to objects that exists in the schema.""" + FULL_SCHEMA = { + "version": "1.0", + "nodes": [ + { + "name": "Criticality", + "namespace": "Test", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + }, + { + "name": "SubObject", + "namespace": "Test", + "menu_placement": "NoSuchObject", + "default_filter": "name__value", + "branch": BranchSupportType.AWARE.value, + "attributes": [ + {"name": "name", "kind": "Text", "unique": True}, + ], + }, + ], + } + + schema = SchemaBranch(cache={}) + schema.load_schema(schema=SchemaRoot(**FULL_SCHEMA)) + + with pytest.raises(ValueError) as exc: + schema.validate_menu_placements() + + assert str(exc.value) == "TestSubObject: NoSuchObject is not a valid menu placement" + + +async def test_schema_branch_load_schema_extension( + db: InfrahubDatabase, default_branch, organization_schema, builtin_schema, helper +): schema = SchemaRoot(**core_models) schema_branch = SchemaBranch(cache={}, name="test") schema_branch.load_schema(schema=schema) + schema_branch.load_schema(schema=builtin_schema) + schema_branch.load_schema(schema=organization_schema) schema_branch.process() org = schema_branch.get(name="CoreOrganization") @@ -416,6 +800,7 @@ async def test_schema_branch_process_filters( expected_filters = [ {"name": "ids", "kind": FilterSchemaKind.TEXT, "enum": None, "object_kind": None, "description": None}, {"name": "name__value", "kind": FilterSchemaKind.TEXT, "enum": None, "object_kind": None, "description": None}, + {"name": "name__values", "kind": FilterSchemaKind.TEXT, "enum": None, "object_kind": None, "description": None}, { "name": "name__is_visible", "kind": FilterSchemaKind.BOOLEAN, @@ -451,6 +836,13 @@ async def test_schema_branch_process_filters( "object_kind": None, "description": None, }, + { + "name": "level__values", + "kind": FilterSchemaKind.TEXT, + "enum": None, + "object_kind": None, + "description": None, + }, { "name": "level__is_visible", "kind": FilterSchemaKind.BOOLEAN, @@ -480,6 +872,13 @@ async def test_schema_branch_process_filters( "description": None, }, {"name": "color__value", "kind": FilterSchemaKind.TEXT, "enum": None, "object_kind": None, "description": None}, + { + "name": "color__values", + "kind": FilterSchemaKind.TEXT, + "enum": None, + "object_kind": None, + "description": None, + }, { "name": "color__is_visible", "kind": FilterSchemaKind.BOOLEAN, @@ -515,6 +914,13 @@ async def test_schema_branch_process_filters( "object_kind": None, "description": None, }, + { + "name": "description__values", + "kind": FilterSchemaKind.TEXT, + "enum": None, + "object_kind": None, + "description": None, + }, { "name": "description__is_visible", "kind": FilterSchemaKind.BOOLEAN, @@ -862,6 +1268,7 @@ async def test_load_schema_to_db_simple_01( db: InfrahubDatabase, default_branch: Branch, register_core_models_schema: SchemaBranch, + register_builtin_models_schema: SchemaBranch, helper, ): schema = SchemaRoot(**helper.schema_file("infra_simple_01.json")) @@ -879,6 +1286,7 @@ async def test_load_schema_to_db_w_generics_01( db: InfrahubDatabase, default_branch: Branch, register_core_models_schema: SchemaBranch, + register_builtin_models_schema: SchemaBranch, helper, ): schema = SchemaRoot(**helper.schema_file("infra_w_generics_01.json")) diff --git a/backend/tests/unit/core/test_node_query.py b/backend/tests/unit/core/test_node_query.py index 91ec5e9d91..77c8d87bf9 100644 --- a/backend/tests/unit/core/test_node_query.py +++ b/backend/tests/unit/core/test_node_query.py @@ -6,6 +6,7 @@ from infrahub.core.node import Node from infrahub.core.query.node import ( NodeCreateAllQuery, + NodeDeleteQuery, NodeGetListQuery, NodeListGetAttributeQuery, NodeListGetInfoQuery, @@ -348,3 +349,18 @@ async def test_query_NodeListGetRelationshipsQuery(db: InfrahubDatabase, default assert person_jack_tags_main.id in result assert "builtintag__testperson" in result[person_jack_tags_main.id] assert len(result[person_jack_tags_main.id]["builtintag__testperson"]) == 2 + + +async def test_query_NodeDeleteQuery( + db: InfrahubDatabase, + default_branch: Branch, + person_jack_tags_main: Node, + tag_blue_main: Node, +): + tags_before = await NodeManager.query(db=db, schema="BuiltinTag", branch=default_branch) + + query = await NodeDeleteQuery.init(db=db, node=tag_blue_main, branch=default_branch) + await query.execute(db=db) + + tags_after = await NodeManager.query(db=db, schema="BuiltinTag", branch=default_branch) + assert len(tags_after) == len(tags_before) - 1 diff --git a/backend/tests/unit/core/test_query.py b/backend/tests/unit/core/test_query.py index 127f983a30..cec421dc03 100644 --- a/backend/tests/unit/core/test_query.py +++ b/backend/tests/unit/core/test_query.py @@ -5,6 +5,7 @@ Query, QueryNode, QueryRel, + QueryRelDirection, QueryResult, cleanup_return_labels, sort_results_by_time, @@ -218,11 +219,18 @@ def test_query_node(): def test_query_rel(): - assert str(QueryRel()) == "[]" - assert str(QueryRel(name="r2")) == "[r2]" - assert str(QueryRel(name="r2", labels=["HAS_VALUE"])) == "[r2:HAS_VALUE]" - assert str(QueryRel(labels=["HAS_VALUE"])) == "[:HAS_VALUE]" - assert str(QueryRel(name="r2", labels=["HAS_VALUE", "IS_RELATED"])) == "[r2:HAS_VALUE:IS_RELATED]" - assert str(QueryRel(name="r2", labels=["HAS_VALUE"], params={"name": "john"})) == '[r2:HAS_VALUE { name: "john" }]' - assert str(QueryRel(labels=["HAS_VALUE"], params={"name": "john"})) == '[:HAS_VALUE { name: "john" }]' - assert str(QueryRel(labels=["HAS_VALUE"], params={"name": "$myvar"})) == "[:HAS_VALUE { name: $myvar }]" + assert str(QueryRel()) == "-[]-" + assert str(QueryRel(name="r2")) == "-[r2]-" + assert str(QueryRel(name="r2", direction=QueryRelDirection.INBOUND)) == "<-[r2]-" + assert str(QueryRel(name="r2", direction=QueryRelDirection.OUTBOUND)) == "-[r2]->" + assert str(QueryRel(name="r2", labels=["HAS_VALUE"])) == "-[r2:HAS_VALUE]-" + assert str(QueryRel(labels=["HAS_VALUE"])) == "-[:HAS_VALUE]-" + assert str(QueryRel(name="r2", labels=["HAS_VALUE", "IS_RELATED"])) == "-[r2:HAS_VALUE:IS_RELATED]-" + assert ( + str(QueryRel(name="r2", labels=["HAS_VALUE"], params={"name": "john"})) == '-[r2:HAS_VALUE { name: "john" }]-' + ) + assert str(QueryRel(labels=["HAS_VALUE"], params={"name": "john"})) == '-[:HAS_VALUE { name: "john" }]-' + assert ( + str(QueryRel(labels=["HAS_VALUE"], params={"name": "$myvar"}, direction=QueryRelDirection.OUTBOUND)) + == "-[:HAS_VALUE { name: $myvar }]->" + ) diff --git a/backend/tests/unit/core/test_query_subquery.py b/backend/tests/unit/core/test_query_subquery.py index b6dacbb99e..1b2755c343 100644 --- a/backend/tests/unit/core/test_query_subquery.py +++ b/backend/tests/unit/core/test_query_subquery.py @@ -81,7 +81,7 @@ async def test_build_subquery_filter_relationship(db: InfrahubDatabase, default_ # ruff: noqa: E501 expected_query = """ WITH n - MATCH p = (n)-[f1r1:IS_RELATED]-(rl:Relationship { name: $filter1_rel_name })-[f1r2:IS_RELATED]-(peer:Node)-[f1r3:HAS_ATTRIBUTE]-(i:Attribute { name: $filter1_name })-[f1r4:HAS_VALUE]-(av:AttributeValue { value: $filter1_value }) + MATCH p = (n)-[f1r1:IS_RELATED]->(rl:Relationship { name: $filter1_rel_name })-[f1r2:IS_RELATED]->(peer:Node)-[f1r3:HAS_ATTRIBUTE]-(i:Attribute { name: $filter1_name })-[f1r4:HAS_VALUE]-(av:AttributeValue { value: $filter1_value }) WHERE all(r IN relationships(p) WHERE (PLACEHOLDER)) RETURN n as filter1 ORDER BY f1r1.branch_level DESC, f1r1.from DESC, f1r2.branch_level DESC, f1r2.from DESC, f1r3.branch_level DESC, f1r3.from DESC, f1r4.branch_level DESC, f1r4.from DESC @@ -114,7 +114,7 @@ async def test_build_subquery_filter_relationship_ids(db: InfrahubDatabase, defa # ruff: noqa: E501 expected_query = """ WITH n - MATCH p = (n)-[f1r1:IS_RELATED]-(rl:Relationship { name: $filter1_rel_name })-[f1r2:IS_RELATED]-(peer:Node) + MATCH p = (n)-[f1r1:IS_RELATED]->(rl:Relationship { name: $filter1_rel_name })-[f1r2:IS_RELATED]->(peer:Node) WHERE peer.uuid IN $filter1_peer_ids AND all(r IN relationships(p) WHERE (PLACEHOLDER)) RETURN n as filter1 ORDER BY f1r1.branch_level DESC, f1r1.from DESC, f1r2.branch_level DESC, f1r2.from DESC @@ -141,7 +141,7 @@ async def test_build_subquery_order_relationship(db: InfrahubDatabase, default_b expected_query = """ WITH n - MATCH p = (n)-[ord1r1:IS_RELATED]-(:Relationship { name: $order1_rel_name })-[ord1r2:IS_RELATED]-(:Node)-[ord1r3:HAS_ATTRIBUTE]-(:Attribute { name: $order1_name })-[ord1r4:HAS_VALUE]-(last:AttributeValue) + MATCH p = (n)-[ord1r1:IS_RELATED]->(:Relationship { name: $order1_rel_name })-[ord1r2:IS_RELATED]->(:Node)-[ord1r3:HAS_ATTRIBUTE]-(:Attribute { name: $order1_name })-[ord1r4:HAS_VALUE]-(last:AttributeValue) WHERE all(r IN relationships(p) WHERE (PLACEHOLDER)) RETURN last.value as order1 ORDER BY ord1r1.branch_level DESC, ord1r1.from DESC, ord1r2.branch_level DESC, ord1r2.from DESC, ord1r3.branch_level DESC, ord1r3.from DESC, ord1r4.branch_level DESC, ord1r4.from DESC @@ -150,3 +150,67 @@ async def test_build_subquery_order_relationship(db: InfrahubDatabase, default_b assert query == expected_query assert params == {"order1_name": "name", "order1_rel_name": "testcar__testperson"} assert result_name == "order1" + + +async def test_build_subquery_filter_attribute_multiple_values( + db: InfrahubDatabase, default_branch: Branch, all_attribute_types_schema: NodeSchema +): + attr_schema = all_attribute_types_schema.get_attribute(name="mystring") + + query, params, result_name = await build_subquery_filter( + db=db, + field=attr_schema, + name="name", + filter_name="values", + filter_value=["myvalue", "myothervalue"], + branch_filter="PLACEHOLDER", + branch=default_branch, + subquery_idx=1, + ) + + expected_query = """ + WITH n + MATCH p = (n)-[f1r1:HAS_ATTRIBUTE]-(i:Attribute { name: $filter1_name })-[f1r2:HAS_VALUE]-(av:AttributeValue) + WHERE av.value IN $filter1_value AND all(r IN relationships(p) WHERE (PLACEHOLDER)) + RETURN n as filter1 + ORDER BY f1r1.branch_level DESC, f1r1.from DESC, f1r2.branch_level DESC, f1r2.from DESC + LIMIT 1 + """ + assert query == expected_query + assert params == {"filter1_name": "name", "filter1_value": ["myvalue", "myothervalue"]} + assert result_name == "filter1" + + +async def test_build_subquery_filter_relationship_multiple_values( + db: InfrahubDatabase, default_branch: Branch, car_person_schema +): + car_schema = registry.schema.get(name="TestCar") + rel_schema = car_schema.get_relationship(name="owner") + + query, params, result_name = await build_subquery_filter( + db=db, + field=rel_schema, + name="owner", + filter_name="name__values", + filter_value=["john", "jane"], + branch_filter="PLACEHOLDER", + branch=default_branch, + subquery_idx=1, + ) + + # ruff: noqa: E501 + expected_query = """ + WITH n + MATCH p = (n)-[f1r1:IS_RELATED]->(rl:Relationship { name: $filter1_rel_name })-[f1r2:IS_RELATED]->(peer:Node)-[f1r3:HAS_ATTRIBUTE]-(i:Attribute { name: $filter1_name })-[f1r4:HAS_VALUE]-(av:AttributeValue) + WHERE av.value IN $filter1_value AND all(r IN relationships(p) WHERE (PLACEHOLDER)) + RETURN n as filter1 + ORDER BY f1r1.branch_level DESC, f1r1.from DESC, f1r2.branch_level DESC, f1r2.from DESC, f1r3.branch_level DESC, f1r3.from DESC, f1r4.branch_level DESC, f1r4.from DESC + LIMIT 1 + """ + assert query == expected_query + assert params == { + "filter1_name": "name", + "filter1_rel_name": "testcar__testperson", + "filter1_value": ["john", "jane"], + } + assert result_name == "filter1" diff --git a/backend/tests/unit/core/test_schema.py b/backend/tests/unit/core/test_schema.py index 26324cc44e..6ec0b1fc02 100644 --- a/backend/tests/unit/core/test_schema.py +++ b/backend/tests/unit/core/test_schema.py @@ -2,13 +2,14 @@ import pytest from deepdiff import DeepDiff -from pydantic.error_wrappers import ValidationError +from pydantic import ValidationError from infrahub.core import registry from infrahub.core.constants import BranchSupportType from infrahub.core.schema import ( AttributeSchema, BaseSchemaModel, + DropdownChoice, NodeSchema, RelationshipSchema, SchemaRoot, @@ -138,43 +139,6 @@ def test_schema_root_no_generic(): assert SchemaRoot(**FULL_SCHEMA) -def test_node_schema_unique_names(): - SCHEMA = { - "name": "Criticality", - "namespace": "Test", - "default_filter": "name__value", - "branch": BranchSupportType.AWARE.value, - "attributes": [ - {"name": "name", "kind": "Text", "unique": True}, - {"name": "name", "kind": "Text", "unique": True}, - ], - } - - with pytest.raises(ValidationError) as exc: - NodeSchema(**SCHEMA) - - assert "Names of attributes and relationships must be unique" in str(exc.value) - - SCHEMA = { - "name": "Criticality", - "namespace": "Test", - "default_filter": "name__value", - "branch": BranchSupportType.AWARE.value, - "attributes": [ - {"name": "name", "kind": "Text", "unique": True}, - {"name": "dupname", "kind": "Text"}, - ], - "relationships": [ - {"name": "dupname", "peer": "Criticality", "cardinality": "one"}, - ], - } - - with pytest.raises(ValidationError) as exc: - NodeSchema(**SCHEMA) - - assert "Names of attributes and relationships must be unique" in str(exc.value) - - def test_node_schema_property_unique_attributes(): SCHEMA = { "name": "Criticality", @@ -192,44 +156,6 @@ def test_node_schema_property_unique_attributes(): assert schema.unique_attributes[0].name == "name" -def test_node_schema_unique_identifiers(): - SCHEMA = { - "name": "Criticality", - "namespace": "Test", - "default_filter": "name__value", - "branch": BranchSupportType.AWARE.value, - "attributes": [ - {"name": "name", "kind": "Text", "unique": True}, - ], - "relationships": [ - {"name": "first", "peer": "TestCriticality", "cardinality": "one"}, - {"name": "second", "peer": "TestCriticality", "cardinality": "one"}, - ], - } - - with pytest.raises(ValidationError) as exc: - schema = NodeSchema(**SCHEMA) - - assert "Identifier of relationships must be unique" in str(exc.value) - - SCHEMA = { - "name": "Criticality", - "namespace": "Test", - "default_filter": "name__value", - "branch": BranchSupportType.AWARE.value, - "attributes": [ - {"name": "name", "kind": "Text", "unique": True}, - ], - "relationships": [ - {"name": "first", "peer": "TestCriticality", "cardinality": "one"}, - {"name": "second", "identifier": "something_unique", "peer": "TestCriticality", "cardinality": "one"}, - ], - } - schema = NodeSchema(**SCHEMA) - assert schema.relationships[0].identifier == "testcriticality__testcriticality" - assert schema.relationships[1].identifier == "something_unique" - - async def test_node_schema_hashable(): SCHEMA = { "name": "Criticality", @@ -301,13 +227,13 @@ async def test_rel_schema_query_filter(db: InfrahubDatabase, default_branch, car filters, params, matchs = await rel.get_query_filter(db=db, filter_name="name__value", filter_value="alice") expected_response = [ "(n)", - "[r1:IS_RELATED]", + "<-[r1:IS_RELATED]-", "(rl:Relationship { name: $rel_cars_rel_name })", - "[r2:IS_RELATED]", + "<-[r2:IS_RELATED]-", "(peer:Node)", - "[:HAS_ATTRIBUTE]", + "-[:HAS_ATTRIBUTE]-", "(i:Attribute { name: $attr_name_name })", - "[:HAS_VALUE]", + "-[:HAS_VALUE]-", "(av:AttributeValue { value: $attr_name_value })", ] assert [str(item) for item in filters] == expected_response @@ -318,9 +244,9 @@ async def test_rel_schema_query_filter(db: InfrahubDatabase, default_branch, car filters, params, matchs = await rel.get_query_filter(db=db, name="bob", filter_name="id", filter_value="XXXX-YYYY") expected_response = [ "(n)", - "[r1:IS_RELATED]", + "<-[r1:IS_RELATED]-", "(rl:Relationship { name: $rel_cars_rel_name })", - "[r2:IS_RELATED]", + "<-[r2:IS_RELATED]-", "(peer:Node { uuid: $rel_cars_peer_id })", ] assert [str(item) for item in filters] == expected_response @@ -336,13 +262,13 @@ async def test_rel_schema_query_filter_no_value(db: InfrahubDatabase, default_br filters, params, matchs = await rel.get_query_filter(db=db, filter_name="name__value") expected_response = [ "(n)", - "[r1:IS_RELATED]", + "<-[r1:IS_RELATED]-", "(rl:Relationship { name: $rel_cars_rel_name })", - "[r2:IS_RELATED]", + "<-[r2:IS_RELATED]-", "(peer:Node)", - "[:HAS_ATTRIBUTE]", + "-[:HAS_ATTRIBUTE]-", "(i:Attribute { name: $attr_name_name })", - "[:HAS_VALUE]", + "-[:HAS_VALUE]-", "(av:AttributeValue)", ] assert [str(item) for item in filters] == expected_response @@ -353,9 +279,9 @@ async def test_rel_schema_query_filter_no_value(db: InfrahubDatabase, default_br filters, params, matchs = await rel.get_query_filter(db=db, name="bob", filter_name="id") expected_response = [ "(n)", - "[r1:IS_RELATED]", + "<-[r1:IS_RELATED]-", "(rl:Relationship { name: $rel_cars_rel_name })", - "[r2:IS_RELATED]", + "<-[r2:IS_RELATED]-", "(peer:Node)", ] assert [str(item) for item in filters] == expected_response @@ -369,3 +295,36 @@ def test_core_models(): def test_internal_schema(): assert SchemaRoot(**internal_schema) + + +async def test_attribute_schema_choices_invalid_kind(): + SCHEMA = {"name": "name", "kind": "Text", "choices": [DropdownChoice(name="active", color="#AAbb0f")]} + + with pytest.raises(ValidationError) as exc: + AttributeSchema(**SCHEMA) + + assert "Can only specify 'choices' for kind=Dropdown" in str(exc.value) + + +async def test_attribute_schema_dropdown_missing_choices(): + SCHEMA = {"name": "name", "kind": "Dropdown"} + + with pytest.raises(ValidationError) as exc: + AttributeSchema(**SCHEMA) + + assert "The property 'choices' is required for kind=Dropdown" in str(exc.value) + + +def test_dropdown_choice_colors(): + active = DropdownChoice(name="active", color="#AAbb0f") + assert active.color == "#aabb0f" + with pytest.raises(ValidationError) as exc: + DropdownChoice(name="active", color="off-white") + + assert "Color must be a valid HTML color code" in str(exc.value) + + +def test_dropdown_choice_sort(): + active = DropdownChoice(name="active", color="#AAbb0f") + passive = DropdownChoice(name="passive", color="#AAbb0f") + assert active < passive diff --git a/backend/tests/unit/core/test_schema_manager.py b/backend/tests/unit/core/test_schema_manager.py deleted file mode 100644 index 931eace9b0..0000000000 --- a/backend/tests/unit/core/test_schema_manager.py +++ /dev/null @@ -1,41 +0,0 @@ -import pytest - -from infrahub.core.constants import BranchSupportType -from infrahub.core.schema import SchemaRoot -from infrahub.core.schema_manager import SchemaBranch - - -async def test_schema_menu_placement_errors(): - """Validate that menu placements points to objects that exists in the schema.""" - FULL_SCHEMA = { - "version": "1.0", - "nodes": [ - { - "name": "Criticality", - "namespace": "Test", - "default_filter": "name__value", - "branch": BranchSupportType.AWARE.value, - "attributes": [ - {"name": "name", "kind": "Text", "unique": True}, - ], - }, - { - "name": "SubObject", - "namespace": "Test", - "menu_placement": "NoSuchObject", - "default_filter": "name__value", - "branch": BranchSupportType.AWARE.value, - "attributes": [ - {"name": "name", "kind": "Text", "unique": True}, - ], - }, - ], - } - - schema = SchemaBranch(cache={}) - schema.load_schema(schema=SchemaRoot(**FULL_SCHEMA)) - - with pytest.raises(ValueError) as exc: - schema.process() - - assert str(exc.value) == "TestSubObject: NoSuchObject is not a valid menu placement" diff --git a/backend/tests/unit/graphql/auth/__init__.py b/backend/tests/unit/graphql/auth/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/backend/tests/unit/graphql/auth/test_anonymous_checker.py b/backend/tests/unit/graphql/auth/test_anonymous_checker.py new file mode 100644 index 0000000000..e8f110efaf --- /dev/null +++ b/backend/tests/unit/graphql/auth/test_anonymous_checker.py @@ -0,0 +1,38 @@ +from unittest.mock import AsyncMock, MagicMock + +import pytest + +from infrahub.auth import AccountSession, AuthType +from infrahub.exceptions import AuthorizationError +from infrahub.graphql.analyzer import GraphQLQueryAnalyzer +from infrahub.graphql.auth.query_permission_checker.anonymous_checker import AnonymousGraphQLPermissionChecker + + +class TestAnonymousAuthChecker: + def setup_method(self): + self.account_session = AccountSession(account_id="abc", auth_type=AuthType.JWT) + self.graphql_query = AsyncMock(spec=GraphQLQueryAnalyzer) + self.mock_anonymous_setting_get = MagicMock(return_value=True) + self.checker = AnonymousGraphQLPermissionChecker(self.mock_anonymous_setting_get) + + @pytest.mark.parametrize("is_authenticated,is_supported", [(True, False), (False, True)]) + async def test_supports_unauthenticated_accounts(self, is_authenticated, is_supported): + self.account_session.authenticated = is_authenticated + + has_support = await self.checker.supports(self.account_session) + + assert is_supported is has_support + + @pytest.mark.parametrize("anonymous_setting,query_has_mutations", [(False, False), (False, True), (True, True)]) + async def test_failures_raise_error(self, anonymous_setting, query_has_mutations): + self.mock_anonymous_setting_get.return_value = anonymous_setting + self.graphql_query.contains_mutation = query_has_mutations + + with pytest.raises(AuthorizationError): + await self.checker.check(self.graphql_query) + + async def test_check_passes(self): + self.mock_anonymous_setting_get.return_value = True + self.graphql_query.contains_mutation = False + + await self.checker.check(self.graphql_query) diff --git a/backend/tests/unit/graphql/auth/test_default_checker.py b/backend/tests/unit/graphql/auth/test_default_checker.py new file mode 100644 index 0000000000..a44dcc32d9 --- /dev/null +++ b/backend/tests/unit/graphql/auth/test_default_checker.py @@ -0,0 +1,28 @@ +from unittest.mock import AsyncMock + +import pytest + +from infrahub.auth import AccountSession, AuthType +from infrahub.core.constants import AccountRole +from infrahub.exceptions import AuthorizationError +from infrahub.graphql.analyzer import GraphQLQueryAnalyzer +from infrahub.graphql.auth.query_permission_checker.default_checker import DefaultGraphQLPermissionChecker + + +class TestDefaultAuthChecker: + def setup_method(self): + self.account_session = AccountSession(account_id="abc", auth_type=AuthType.JWT) + self.graphql_query = AsyncMock(spec=GraphQLQueryAnalyzer) + self.checker = DefaultGraphQLPermissionChecker() + + @pytest.mark.parametrize("role", [x.value for x in AccountRole]) + async def test_supports_all_accounts(self, role): + self.account_session.role = role + + is_supported = await self.checker.supports(self.account_session) + + assert is_supported is True + + async def test_always_raises_error(self): + with pytest.raises(AuthorizationError): + await self.checker.check(self.graphql_query) diff --git a/backend/tests/unit/graphql/auth/test_parent_checker.py b/backend/tests/unit/graphql/auth/test_parent_checker.py new file mode 100644 index 0000000000..b9608327a6 --- /dev/null +++ b/backend/tests/unit/graphql/auth/test_parent_checker.py @@ -0,0 +1,41 @@ +from unittest.mock import AsyncMock + +import pytest + +from infrahub.auth import AccountSession, AuthType +from infrahub.core.constants import AccountRole +from infrahub.exceptions import PermissionDeniedError +from infrahub.graphql.analyzer import GraphQLQueryAnalyzer +from infrahub.graphql.auth.query_permission_checker.checker import GraphQLQueryPermissionChecker +from infrahub.graphql.auth.query_permission_checker.interface import GraphQLQueryPermissionCheckerInterface + + +class TestParentAuthChecker: + def setup_method(self): + self.account_session = AccountSession(account_id="abc", auth_type=AuthType.JWT, role=AccountRole.ADMIN) + self.graphql_query = AsyncMock(spec=GraphQLQueryAnalyzer) + self.sub_auth_checker_one = AsyncMock(spec=GraphQLQueryPermissionCheckerInterface) + self.sub_auth_checker_two = AsyncMock(spec=GraphQLQueryPermissionCheckerInterface) + self.sub_auth_checker_one.supports.return_value = False + self.sub_auth_checker_two.supports.return_value = True + self.parent_checker = GraphQLQueryPermissionChecker([self.sub_auth_checker_one, self.sub_auth_checker_two]) + + async def __call_system_under_test(self): + await self.parent_checker.check(self.account_session, self.graphql_query) + + async def test_only_checks_one(self): + await self.__call_system_under_test() + + self.sub_auth_checker_one.supports.assert_awaited_once_with(self.account_session) + self.sub_auth_checker_two.supports.assert_awaited_once_with(self.account_session) + self.sub_auth_checker_one.check.assert_not_awaited() + self.sub_auth_checker_two.check.assert_awaited_once_with(self.graphql_query) + + async def test_error_if_no_support(self): + self.sub_auth_checker_two.supports.return_value = False + + with pytest.raises(PermissionDeniedError): + await self.__call_system_under_test() + + self.sub_auth_checker_one.check.assert_not_awaited() + self.sub_auth_checker_two.check.assert_not_awaited() diff --git a/backend/tests/unit/graphql/auth/test_read_only_checker.py b/backend/tests/unit/graphql/auth/test_read_only_checker.py new file mode 100644 index 0000000000..43db65de93 --- /dev/null +++ b/backend/tests/unit/graphql/auth/test_read_only_checker.py @@ -0,0 +1,54 @@ +from unittest.mock import AsyncMock + +import pytest +from graphql import OperationType + +from infrahub.auth import AccountSession, AuthType +from infrahub.core.constants import AccountRole +from infrahub.exceptions import PermissionDeniedError +from infrahub.graphql.analyzer import GraphQLOperation, GraphQLQueryAnalyzer +from infrahub.graphql.auth.query_permission_checker.read_only_checker import ReadOnlyGraphQLPermissionChecker + + +class TestReadOnlyAuthChecker: + def setup_method(self): + self.account_session = AccountSession(account_id="abc", auth_type=AuthType.JWT, role=AccountRole.READ_ONLY) + self.graphql_query = AsyncMock(spec=GraphQLQueryAnalyzer) + self.checker = ReadOnlyGraphQLPermissionChecker() + + @pytest.mark.parametrize("role", [AccountRole.ADMIN, AccountRole.READ_WRITE]) + async def test_doesnt_supports_other_accounts(self, role): + self.account_session.role = role + + is_supported = await self.checker.supports(self.account_session) + + assert is_supported is False + + async def test_supports_read_only_accounts(self): + self.account_session.role = AccountRole.READ_ONLY + + is_supported = await self.checker.supports(self.account_session) + + assert is_supported is True + + async def test_illegal_mutation_raises_error(self): + self.graphql_query.contains_mutation = True + self.graphql_query.operations = [ + GraphQLOperation(name="ThisIsNotAllowed", operation_type=OperationType.MUTATION) + ] + + with pytest.raises(PermissionDeniedError): + await self.checker.check(self.graphql_query) + + async def test_legal_mutation_is_okay(self): + self.checker.allowed_readonly_mutations = ["ThisIsAllowed"] + self.graphql_query.contains_mutation = True + self.graphql_query.operations = [GraphQLOperation(name="ThisIsAllowed", operation_type=OperationType.MUTATION)] + + await self.checker.check(self.graphql_query) + + async def test_query_is_okay(self): + self.graphql_query.contains_mutation = False + self.graphql_query.operations = [GraphQLOperation(name="ThisIsAQuery", operation_type=OperationType.QUERY)] + + await self.checker.check(self.graphql_query) diff --git a/backend/tests/unit/graphql/auth/test_read_write_checker.py b/backend/tests/unit/graphql/auth/test_read_write_checker.py new file mode 100644 index 0000000000..08f9b7eddf --- /dev/null +++ b/backend/tests/unit/graphql/auth/test_read_write_checker.py @@ -0,0 +1,36 @@ +from unittest.mock import AsyncMock + +import pytest + +from infrahub.auth import AccountSession, AuthType +from infrahub.core.constants import AccountRole +from infrahub.graphql.analyzer import GraphQLQueryAnalyzer +from infrahub.graphql.auth.query_permission_checker.read_write_checker import ReadWriteGraphQLPermissionChecker + + +class TestReadWriteAuthChecker: + def setup_method(self): + self.account_session = AccountSession(account_id="abc", auth_type=AuthType.JWT, role=AccountRole.ADMIN) + self.graphql_query = AsyncMock(spec=GraphQLQueryAnalyzer) + self.checker = ReadWriteGraphQLPermissionChecker() + + @pytest.mark.parametrize("role", [AccountRole.ADMIN, AccountRole.READ_WRITE]) + async def test_supports_readwrite_accounts(self, role): + self.account_session.role = role + + is_supported = await self.checker.supports(self.account_session) + + assert is_supported is True + + async def test_doesnt_support_readonly_accounts(self): + self.account_session.role = AccountRole.READ_ONLY + + is_supported = await self.checker.supports(self.account_session) + + assert is_supported is False + + @pytest.mark.parametrize("contains_mutations", [True, False]) + async def test_never_raises_error(self, contains_mutations): + self.graphql_query.contains_mutations = contains_mutations + + await self.checker.check(self.graphql_query) diff --git a/backend/tests/unit/graphql/conftest.py b/backend/tests/unit/graphql/conftest.py index 72efc3aea1..206bca0d44 100644 --- a/backend/tests/unit/graphql/conftest.py +++ b/backend/tests/unit/graphql/conftest.py @@ -220,3 +220,109 @@ def bad_query_01() -> str: } """ return query + + +@pytest.fixture +def query_introspection() -> str: + query = """ + query IntrospectionQuery { + __schema { + queryType { + name + } + mutationType { + name + } + subscriptionType { + name + } + types { + ...FullType + } + directives { + name + description + locations + args { + ...InputValue + } + } + } + } + + fragment FullType on __Type { + kind + name + description + fields(includeDeprecated: true) { + name + description + args { + ...InputValue + } + type { + ...TypeRef + } + isDeprecated + deprecationReason + } + inputFields { + ...InputValue + } + interfaces { + ...TypeRef + } + enumValues(includeDeprecated: true) { + name + description + isDeprecated + deprecationReason + } + possibleTypes { + ...TypeRef + } + } + + fragment InputValue on __InputValue { + name + description + type { + ...TypeRef + } + defaultValue + } + + fragment TypeRef on __Type { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + } + } + } + } + } + } + } + } + """ + return query diff --git a/backend/tests/unit/graphql/mutations/__init__.py b/backend/tests/unit/graphql/mutations/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/backend/tests/unit/graphql/mutations/test_schema.py b/backend/tests/unit/graphql/mutations/test_schema.py new file mode 100644 index 0000000000..d07bbc0f46 --- /dev/null +++ b/backend/tests/unit/graphql/mutations/test_schema.py @@ -0,0 +1,200 @@ +import pytest +from graphql import graphql + +from infrahub.core.node import Node +from infrahub.core.schema import GroupSchema +from infrahub.database import InfrahubDatabase +from infrahub.exceptions import ValidationError +from infrahub.graphql import generate_graphql_schema +from infrahub.graphql.mutations.schema import validate_kind, validate_kind_dropdown, validate_kind_enum + + +async def test_delete_last_dropdown_option(db: InfrahubDatabase, default_branch, choices_schema): + query = """ + mutation { + SchemaDropdownRemove(data: {kind: "TestChoice", attribute: "temperature_scale", dropdown: "celsius"}) { + ok + } + } + """ + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": default_branch}, + root_value=None, + variable_values={}, + ) + assert result.errors + assert len(result.errors) == 1 + assert "Unable to remove the last dropdown on TestChoice in attribute temperature_scale" in str(result.errors[0]) + + +async def test_delete_last_enum_option(db: InfrahubDatabase, default_branch, choices_schema): + query = """ + mutation { + SchemaEnumRemove(data: {kind: "BaseChoice", attribute: "measuring_system", enum: "metric"}) { + ok + } + } + """ + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": default_branch}, + root_value=None, + variable_values={}, + ) + assert result.errors + assert len(result.errors) == 1 + assert "Unable to remove the last enum on BaseChoice in attribute measuring_system" in str(result.errors[0]) + + +async def test_delete_enum_option_that_does_not_exist(db: InfrahubDatabase, default_branch, choices_schema): + query = """ + mutation { + SchemaEnumRemove(data: {kind: "BaseChoice", attribute: "color", enum: "yellow"}) { + ok + } + } + """ + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": default_branch}, + root_value=None, + variable_values={}, + ) + assert result.errors + assert len(result.errors) == 1 + assert "The enum value yellow does not exists on BaseChoice in attribute color" in str(result.errors[0]) + + +async def test_delete_drop_option_that_does_not_exist(db: InfrahubDatabase, default_branch, choices_schema): + query = """ + mutation { + SchemaDropdownRemove(data: {kind: "BaseChoice", attribute: "section", dropdown: "ci"}) { + ok + } + } + """ + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": default_branch}, + root_value=None, + variable_values={}, + ) + assert result.errors + assert len(result.errors) == 1 + assert "The dropdown value ci does not exists on BaseChoice in attribute section" in str(result.errors[0]) + + +async def test_add_enum_option_that_exist(db: InfrahubDatabase, default_branch, choices_schema): + query = """ + mutation { + SchemaEnumAdd(data: {kind: "BaseChoice", attribute: "color", enum: "red"}) { + ok + } + } + """ + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": default_branch}, + root_value=None, + variable_values={}, + ) + assert result.errors + assert len(result.errors) == 1 + assert "The enum value red already exists on BaseChoice in attribute color" in str(result.errors[0]) + + +async def test_delete_dropdown_option_in_use(db: InfrahubDatabase, default_branch, choices_schema): + obj1 = await Node.init(db=db, schema="TestChoice") + await obj1.new(db=db, name="test-passive-01", status="passive", temperature_scale="celsius") + await obj1.save(db=db) + + query = """ + mutation { + SchemaDropdownRemove(data: {kind: "TestChoice", attribute: "temperature_scale", dropdown: "celsius"}) { + ok + } + } + """ + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": default_branch}, + root_value=None, + variable_values={}, + ) + assert result.errors + assert len(result.errors) == 1 + assert "There are still TestChoice objects using this dropdown" in str(result.errors[0]) + + +async def test_delete_enum_option_in_use(db: InfrahubDatabase, default_branch, choices_schema): + obj1 = await Node.init(db=db, schema="TestChoice") + await obj1.new(db=db, name="test-passive-01", status="passive") + await obj1.save(db=db) + + query = """ + mutation { + SchemaEnumRemove(data: {kind: "TestChoice", attribute: "status", enum: "passive"}) { + ok + } + } + """ + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": default_branch}, + root_value=None, + variable_values={}, + ) + assert result.errors + assert len(result.errors) == 1 + assert "There are still TestChoice objects using this enum" in str(result.errors[0]) + + +async def test_validate_kind_exceptions(db: InfrahubDatabase, choices_schema): + node = await Node.init(db=db, schema="TestChoice") + restricted_node = await Node.init(db=db, schema="LineageOwner") + group_schema = GroupSchema(id="blank", name="dummy", kind="Dummy", description="") + + with pytest.raises(ValidationError) as exc: + validate_kind(kind=group_schema, attribute="status") + assert "Dummy is not a valid node" in str(exc.value) + + with pytest.raises(ValidationError) as exc: + validate_kind(kind=restricted_node._schema, attribute="status") + + assert "Operation not allowed for LineageOwner in restricted namespace Lineage" in str(exc.value) + + with pytest.raises(ValidationError) as exc: + validate_kind(kind=node._schema, attribute="no_attribute") + + assert "Attribute no_attribute does not exist on TestChoice" in str(exc.value) + + with pytest.raises(ValidationError) as exc: + validate_kind(kind=node._schema, attribute="color") + + assert "Attribute color on TestChoice is inherited and must be changed on the generic" in str(exc.value) + + +async def test_validate_kind_dropdown_exceptions(db: InfrahubDatabase, choices_schema): + node = await Node.init(db=db, schema="TestChoice") + + with pytest.raises(ValidationError) as exc: + validate_kind_dropdown(kind=node._schema, attribute="comment") + + assert "Attribute comment on TestChoice is not a Dropdown" in str(exc.value) + + +async def test_validate_kind_enum_exceptions(db: InfrahubDatabase, choices_schema): + node = await Node.init(db=db, schema="TestChoice") + + with pytest.raises(ValidationError) as exc: + validate_kind_enum(kind=node._schema, attribute="comment") + + assert "Attribute comment on TestChoice is not an enum" in str(exc.value) diff --git a/backend/tests/unit/graphql/test_core_account_self_update.py b/backend/tests/unit/graphql/test_core_account_self_update.py new file mode 100644 index 0000000000..d5edac3e8a --- /dev/null +++ b/backend/tests/unit/graphql/test_core_account_self_update.py @@ -0,0 +1,42 @@ +import bcrypt +import pytest +from graphql import graphql + +from infrahub.auth import AccountSession, AuthType +from infrahub.core.branch import Branch +from infrahub.core.constants import AccountRole +from infrahub.core.manager import NodeManager +from infrahub.database import InfrahubDatabase +from infrahub.graphql import generate_graphql_schema + + +@pytest.mark.parametrize("role", [e.value for e in AccountRole]) +async def test_everyone_can_update_password(db: InfrahubDatabase, default_branch: Branch, first_account, role): + new_password = "NewP@ssw0rd" + new_description = "what a cool description" + query = """ + mutation { + CoreAccountSelfUpdate(data: {password: "%s", description: "%s"}) { + ok + } + } + """ % (new_password, new_description) + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch), + source=query, + context_value={ + "infrahub_database": db, + "account_session": AccountSession( + authenticated=True, account_id=first_account.id, role=role, auth_type=AuthType.JWT + ), + }, + root_value=None, + variable_values={}, + ) + + assert result.errors is None + assert result.data["CoreAccountSelfUpdate"]["ok"] is True + + updated_account = await NodeManager.get_one(db=db, id=first_account.id, branch=default_branch) + assert bcrypt.checkpw(new_password.encode("UTF-8"), updated_account.password.value.encode("UTF-8")) + assert updated_account.description.value == new_description diff --git a/backend/tests/unit/graphql/test_generator.py b/backend/tests/unit/graphql/test_generator.py index 87dfc42fd1..901dc0d48a 100644 --- a/backend/tests/unit/graphql/test_generator.py +++ b/backend/tests/unit/graphql/test_generator.py @@ -8,7 +8,9 @@ from infrahub.graphql.generator import ( generate_filters, generate_graphql_mutation_create, + generate_graphql_mutation_create_input, generate_graphql_mutation_update, + generate_graphql_mutation_update_input, generate_graphql_object, generate_interface_object, generate_object_types, @@ -20,7 +22,7 @@ async def test_input_type_registration(): - assert registry.input_type is not {} + assert registry.input_type is not {} # noqa async def test_generate_interface_object(db: InfrahubDatabase, default_branch: Branch, generic_vehicule_schema): @@ -79,6 +81,7 @@ async def test_generate_graphql_object(db: InfrahubDatabase, default_branch: Bra "level", "mylist", "name", + "status", ] @@ -105,7 +108,8 @@ async def test_generate_graphql_object_with_interface( async def test_generate_graphql_mutation_create( db: InfrahubDatabase, default_branch: Branch, group_graphql, criticality_schema ): - result = generate_graphql_mutation_create(schema=criticality_schema, branch=default_branch) + input_type = generate_graphql_mutation_create_input(criticality_schema) + result = generate_graphql_mutation_create(schema=criticality_schema, branch=default_branch, input_type=input_type) assert result._meta.name == "TestCriticalityCreate" assert sorted(list(result._meta.fields.keys())) == ["object", "ok"] @@ -113,7 +117,8 @@ async def test_generate_graphql_mutation_create( async def test_generate_graphql_mutation_update( db: InfrahubDatabase, default_branch: Branch, group_graphql, criticality_schema ): - result = generate_graphql_mutation_update(schema=criticality_schema, branch=default_branch) + input_type = generate_graphql_mutation_update_input(schema=criticality_schema) + result = generate_graphql_mutation_update(schema=criticality_schema, branch=default_branch, input_type=input_type) assert result._meta.name == "TestCriticalityUpdate" assert sorted(list(result._meta.fields.keys())) == ["object", "ok"] @@ -195,39 +200,51 @@ async def test_generate_filters( "any__owner__id", "any__source__id", "any__value", + "any__values", "cars__color__is_protected", "cars__color__is_visible", "cars__color__owner__id", "cars__color__source__id", "cars__color__value", + "cars__color__values", "cars__ids", "cars__name__is_protected", "cars__name__is_visible", "cars__name__owner__id", "cars__name__source__id", "cars__name__value", + "cars__name__values", "cars__nbr_seats__is_protected", "cars__nbr_seats__is_visible", "cars__nbr_seats__owner__id", "cars__nbr_seats__source__id", "cars__nbr_seats__value", + "cars__nbr_seats__values", "height__is_protected", "height__is_visible", "height__owner__id", "height__source__id", "height__value", + "height__values", "member_of_groups__description__value", + "member_of_groups__description__values", "member_of_groups__ids", "member_of_groups__label__value", + "member_of_groups__label__values", "member_of_groups__name__value", + "member_of_groups__name__values", "name__is_protected", "name__is_visible", "name__owner__id", "name__source__id", "name__value", + "name__values", "subscriber_of_groups__description__value", + "subscriber_of_groups__description__values", "subscriber_of_groups__ids", "subscriber_of_groups__label__value", + "subscriber_of_groups__label__values", "subscriber_of_groups__name__value", + "subscriber_of_groups__name__values", ] assert sorted(list(filters.keys())) == sorted(expected_filters) diff --git a/backend/tests/unit/graphql/test_graphql_branch.py b/backend/tests/unit/graphql/test_graphql_branch.py index 96161dcfcc..85cd911754 100644 --- a/backend/tests/unit/graphql/test_graphql_branch.py +++ b/backend/tests/unit/graphql/test_graphql_branch.py @@ -208,17 +208,14 @@ async def test_branch_query( root_value=None, variable_values={}, ) - name_query = ( - """ + name_query = """ query { Branch(name: "%s" ) { id name } } - """ - % branch3["name"] - ) + """ % branch3["name"] name_response = await graphql( schema, source=name_query, @@ -233,9 +230,7 @@ async def test_branch_query( name } } - """ % [ - branch3["id"] - ] + """ % [branch3["id"]] id_query = id_query.replace("'", '"') id_response = await graphql( diff --git a/backend/tests/unit/graphql/test_graphql_query.py b/backend/tests/unit/graphql/test_graphql_query.py index 95761dd226..be3f684745 100644 --- a/backend/tests/unit/graphql/test_graphql_query.py +++ b/backend/tests/unit/graphql/test_graphql_query.py @@ -1158,6 +1158,99 @@ async def test_query_filter_relationship_id(db: InfrahubDatabase, default_branch assert len(result.data["TestPerson"]["edges"][0]["node"]["cars"]["edges"]) == 2 +async def test_query_attribute_multiple_values(db: InfrahubDatabase, default_branch: Branch, car_person_schema): + person = registry.get_schema(name="TestPerson") + + p1 = await Node.init(db=db, schema=person) + await p1.new(db=db, name="John", height=180) + await p1.save(db=db) + p2 = await Node.init(db=db, schema=person) + + await p2.new(db=db, name="Jane", height=170) + await p2.save(db=db) + + query = """ + query { + TestPerson(name__values: ["John", "Jane"]) { + count + } + } + """ + + result = await graphql( + await generate_graphql_schema(branch=default_branch, db=db, include_mutation=False, include_subscription=False), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": default_branch}, + root_value=None, + variable_values={}, + ) + + assert result.errors is None + assert result.data["TestPerson"]["count"] == 2 + + +async def test_query_relationship_multiple_values(db: InfrahubDatabase, default_branch: Branch, car_person_schema): + car = registry.get_schema(name="TestCar") + person = registry.get_schema(name="TestPerson") + + p1 = await Node.init(db=db, schema=person) + await p1.new(db=db, name="John", height=180) + await p1.save(db=db) + p2 = await Node.init(db=db, schema=person) + + await p2.new(db=db, name="Jane", height=170) + await p2.save(db=db) + + c1 = await Node.init(db=db, schema=car) + await c1.new(db=db, name="volt", nbr_seats=4, is_electric=True, owner=p1) + await c1.save(db=db) + c2 = await Node.init(db=db, schema=car) + await c2.new(db=db, name="bolt", nbr_seats=4, is_electric=True, owner=p1) + await c2.save(db=db) + c3 = await Node.init(db=db, schema=car) + await c3.new(db=db, name="nolt", nbr_seats=4, is_electric=True, owner=p2) + await c3.save(db=db) + c4 = await Node.init(db=db, schema=car) + await c4.new(db=db, name="yaris", nbr_seats=5, is_electric=False, owner=p1) + await c4.save(db=db) + + query = """ + query { + TestPerson { + edges { + node { + name { + value + } + cars (name__values: ["volt", "nolt"]) { + edges { + node { + name { + value + } + } + } + } + } + } + } + } + """ + # (name__values: ["John", "Jane"]) + result = await graphql( + await generate_graphql_schema(branch=default_branch, db=db, include_mutation=False, include_subscription=False), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": default_branch}, + root_value=None, + variable_values={}, + ) + + assert result.errors is None + assert len(result.data["TestPerson"]["edges"]) == 2 + assert result.data["TestPerson"]["edges"][0]["node"]["cars"]["edges"][0]["node"]["name"]["value"] == "volt" + assert result.data["TestPerson"]["edges"][1]["node"]["cars"]["edges"][0]["node"]["name"]["value"] == "nolt" + + async def test_query_oneway_relationship(db: InfrahubDatabase, default_branch: Branch, person_tag_schema): t1 = await Node.init(db=db, schema="BuiltinTag") await t1.new(db=db, name="Blue", description="The Blue tag") diff --git a/backend/tests/unit/graphql/test_mutation_artifact_definition.py b/backend/tests/unit/graphql/test_mutation_artifact_definition.py index fab7b11cdf..d946353a9a 100644 --- a/backend/tests/unit/graphql/test_mutation_artifact_definition.py +++ b/backend/tests/unit/graphql/test_mutation_artifact_definition.py @@ -142,9 +142,7 @@ async def test_update_artifact_definition( } } } - """ % ( - definition1.id - ) + """ % (definition1.id) result = await graphql( schema=await generate_graphql_schema(db=db, include_subscription=False, branch=branch), diff --git a/backend/tests/unit/graphql/test_mutation_graphqlquery.py b/backend/tests/unit/graphql/test_mutation_graphqlquery.py index 8db07e250c..1eb34caa9e 100644 --- a/backend/tests/unit/graphql/test_mutation_graphqlquery.py +++ b/backend/tests/unit/graphql/test_mutation_graphqlquery.py @@ -47,11 +47,7 @@ async def test_create_query_no_vars(db: InfrahubDatabase, default_branch, regist } } } - """ % query_value.replace( - "\n", " " - ).replace( - '"', '\\"' - ) + """ % query_value.replace("\n", " ").replace('"', '\\"') result = await graphql( schema=await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch), @@ -116,11 +112,7 @@ async def test_create_query_with_vars(db: InfrahubDatabase, default_branch, regi } } } - """ % query_value.replace( - "\n", " " - ).replace( - '"', '\\"' - ) + """ % query_value.replace("\n", " ").replace('"', '\\"') result = await graphql( schema=await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch), @@ -276,9 +268,7 @@ async def test_update_query_no_update(db: InfrahubDatabase, default_branch, regi } } } - """ % ( - obj.id - ) + """ % (obj.id) result = await graphql( schema=await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch), diff --git a/backend/tests/unit/graphql/test_mutation_update.py b/backend/tests/unit/graphql/test_mutation_update.py index cea6164f67..2832ee8e08 100644 --- a/backend/tests/unit/graphql/test_mutation_update.py +++ b/backend/tests/unit/graphql/test_mutation_update.py @@ -353,9 +353,7 @@ async def test_update_delete_optional_relationship_cardinality_one( } } } - """ % ( - car_accord_main.id, - ) + """ % (car_accord_main.id,) result = await graphql( schema=await generate_graphql_schema(db=db, include_subscription=False, branch=branch), source=query, diff --git a/backend/tests/unit/graphql/test_mutation_upsert.py b/backend/tests/unit/graphql/test_mutation_upsert.py new file mode 100644 index 0000000000..54083aac71 --- /dev/null +++ b/backend/tests/unit/graphql/test_mutation_upsert.py @@ -0,0 +1,207 @@ +from uuid import uuid4 + +import pytest +from graphql import graphql + +from infrahub.core.branch import Branch +from infrahub.core.manager import NodeManager +from infrahub.core.node import Node +from infrahub.database import InfrahubDatabase +from infrahub.graphql import generate_graphql_schema + + +@pytest.fixture(autouse=True) +def load_graphql_requirements(group_graphql): + pass + + +async def test_upsert_existing_simple_object_by_id(db: InfrahubDatabase, person_john_main: Node, branch: Branch): + query = ( + """ + mutation { + TestPersonUpsert(data: {id: "%s", name: { value: "Jim"}}) { + ok + } + } + """ + % person_john_main.id + ) + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": branch}, + root_value=None, + variable_values={}, + ) + + assert result.errors is None + assert result.data["TestPersonUpsert"]["ok"] is True + + obj1 = await NodeManager.get_one(db=db, id=person_john_main.id, branch=branch) + assert obj1.name.value == "Jim" + assert obj1.height.value == 180 + + +async def test_upsert_existing_simple_object_by_default_filter( + db: InfrahubDatabase, person_john_main: Node, branch: Branch +): + query = """ + mutation { + TestPersonUpsert(data: {name: { value: "John"}, height: {value: 138}}) { + ok + } + } + """ + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": branch}, + root_value=None, + variable_values={}, + ) + + assert result.errors is None + assert result.data["TestPersonUpsert"]["ok"] is True + + obj1 = await NodeManager.get_one(db=db, id=person_john_main.id, branch=branch) + assert obj1.name.value == "John" + assert obj1.height.value == 138 + + +async def test_upsert_create_simple_object_no_id(db: InfrahubDatabase, person_john_main, branch: Branch): + query = """ + mutation { + TestPersonUpsert(data: {name: { value: "%s"}, height: {value: %s}}) { + ok + object { + id + } + } + } + """ % ("Ellen Ripley", 179) + + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": branch}, + root_value=None, + variable_values={}, + ) + + assert result.errors is None + assert result.data["TestPersonUpsert"]["ok"] is True + + person_id = result.data["TestPersonUpsert"]["object"]["id"] + obj1 = await NodeManager.get_one(db=db, id=person_id, branch=branch) + assert obj1.name.value == "Ellen Ripley" + assert obj1.height.value == 179 + + +async def test_upsert_create_simple_object_with_id(db: InfrahubDatabase, person_john_main, branch: Branch): + fresh_id = str(uuid4()) + query = """ + mutation { + TestPersonUpsert(data: {id: "%s", name: { value: "%s"}, height: {value: %s}}) { + ok + object { + id + } + } + } + """ % (fresh_id, "Dwayne Hicks", 168) + + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": branch}, + root_value=None, + variable_values={}, + ) + + assert result.errors is None + assert result.data["TestPersonUpsert"]["ok"] is True + + person_id = result.data["TestPersonUpsert"]["object"]["id"] + assert person_id == fresh_id + obj1 = await NodeManager.get_one(db=db, id=person_id, branch=branch) + assert obj1.name.value == "Dwayne Hicks" + assert obj1.height.value == 168 + + +async def test_cannot_upsert_new_object_without_required_fields(db: InfrahubDatabase, person_john_main, branch: Branch): + fresh_id = str(uuid4()) + query = ( + """ + mutation { + TestPersonUpsert(data: {id: "%s", height: { value: 182}}) { + ok + } + } + """ + % fresh_id + ) + + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": branch}, + root_value=None, + variable_values={}, + ) + + expected_error = "Field 'TestPersonCreateInput.name' of required type 'TextAttributeInput!' was not provided." + assert any([expected_error in error.message for error in result.errors]) + + assert await NodeManager.get_one(db=db, id=fresh_id, branch=branch) is None + + +async def test_id_for_other_schema_raises_error( + db: InfrahubDatabase, person_john_main, car_accord_main, branch: Branch +): + query = ( + """ + mutation { + TestPersonUpsert(data: {id: "%s", name: {value: "John"}, height: { value: 182}}) { + ok + } + } + """ + % car_accord_main.id + ) + + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": branch}, + root_value=None, + variable_values={}, + ) + + expected_error = f"Node with id {car_accord_main.id} exists, but it is a TestCar, not TestPerson" + assert any([expected_error in error.message for error in result.errors]) + + +async def test_update_by_id_to_nonunique_value_raises_error( + db: InfrahubDatabase, person_john_main, person_jim_main, branch: Branch +): + query = ( + """ + mutation { + TestPersonUpsert(data: {id: "%s", name: {value: "Jim"}}) { + ok + } + } + """ + % person_john_main.id + ) + + result = await graphql( + schema=await generate_graphql_schema(db=db, include_subscription=False, branch=branch), + source=query, + context_value={"infrahub_database": db, "infrahub_branch": branch}, + root_value=None, + variable_values={}, + ) + + expected_error = "An object already exist with this value: name: Jim at name" + assert any([expected_error in error.message for error in result.errors]) diff --git a/backend/tests/unit/graphql/test_query_analyzer.py b/backend/tests/unit/graphql/test_query_analyzer.py index 3d51299dc8..292868b10f 100644 --- a/backend/tests/unit/graphql/test_query_analyzer.py +++ b/backend/tests/unit/graphql/test_query_analyzer.py @@ -5,7 +5,7 @@ from infrahub.core.branch import Branch from infrahub.database import InfrahubDatabase from infrahub.graphql import generate_graphql_schema -from infrahub.graphql.analyzer import GraphQLQueryAnalyzer +from infrahub.graphql.analyzer import GraphQLOperation, GraphQLQueryAnalyzer async def test_analyzer_init_query_only(query_01, bad_query_01): @@ -34,12 +34,17 @@ async def test_nbr_queries(query_01: str, query_03: str): assert gqa.nbr_queries == 2 -async def test_query_types(query_01: str, query_03: str): +async def test_query_types(query_01: str, query_03: str, query_introspection: str): gqa = GraphQLQueryAnalyzer(query=query_01) - assert gqa.operations == {OperationType.QUERY} + assert gqa.operations == [GraphQLOperation(name="TestPerson", operation_type=OperationType.QUERY)] gqa = GraphQLQueryAnalyzer(query=query_03) - assert gqa.operations == {OperationType.QUERY, OperationType.MUTATION} + assert len(gqa.operations) == 2 + assert GraphQLOperation(name="TestPerson", operation_type=OperationType.QUERY) in gqa.operations + assert GraphQLOperation(name="TestPersonCreate", operation_type=OperationType.MUTATION) in gqa.operations + + gqa = GraphQLQueryAnalyzer(query=query_introspection) + assert gqa.operations == [GraphQLOperation(name="__schema", operation_type=OperationType.QUERY)] async def test_is_valid_simple_schema( @@ -49,6 +54,7 @@ async def test_is_valid_simple_schema( query_02: str, query_03: str, query_04: str, + query_introspection: str, car_person_schema_generics, ): schema = await generate_graphql_schema(db=db, include_subscription=False, branch=default_branch) @@ -73,6 +79,11 @@ async def test_is_valid_simple_schema( assert errors is None assert is_valid is True + gqa = GraphQLQueryAnalyzer(query=query_introspection, schema=schema, branch=default_branch) + is_valid, errors = gqa.is_valid + assert errors is None + assert is_valid is True + async def test_is_valid_core_schema( db: InfrahubDatabase, diff --git a/backend/tests/unit/message_bus/operations/event/__init__.py b/backend/tests/unit/message_bus/operations/event/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/backend/tests/unit/message_bus/operations/event/test_branch.py b/backend/tests/unit/message_bus/operations/event/test_branch.py new file mode 100644 index 0000000000..35d24d2ea6 --- /dev/null +++ b/backend/tests/unit/message_bus/operations/event/test_branch.py @@ -0,0 +1,22 @@ +from infrahub.message_bus import messages +from infrahub.message_bus.operations.event.branch import delete +from infrahub.services import InfrahubServices + + +async def test_delete(helper): + """Validate that a deleted branch triggers a registry refresh and cancels open proposed changes""" + + message = messages.EventBranchDelete( + branch_id="40fb612f-eaaa-422b-9480-df269080c103", branch="cr1234", data_only=False + ) + + recorder = helper.get_message_bus_recorder() + service = InfrahubServices(message_bus=recorder) + + await delete(message=message, service=service) + + assert len(recorder.messages) == 2 + assert isinstance(recorder.messages[0], messages.RefreshRegistryBranches) + assert isinstance(recorder.messages[1], messages.TriggerProposedChangeCancel) + trigger_cancel: messages.TriggerProposedChangeCancel = recorder.messages[1] + assert trigger_cancel.branch == "cr1234" diff --git a/backend/tests/unit/message_bus/operations/requests/test_proposed_change.py b/backend/tests/unit/message_bus/operations/requests/test_proposed_change.py index 7925d974cb..33debaf37a 100644 --- a/backend/tests/unit/message_bus/operations/requests/test_proposed_change.py +++ b/backend/tests/unit/message_bus/operations/requests/test_proposed_change.py @@ -27,8 +27,8 @@ async def test_repository_checks(helper): service = InfrahubServices(client=client, message_bus=bus_recorder) message = messages.RequestProposedChangeRepositoryChecks(proposed_change=proposed_change_id) await repository_checks(message=message, service=service) - assert len(bus_recorder.messages) == 2 - assert ["request.repository.checks"] == bus_recorder.seen_routing_keys + assert len(bus_recorder.messages) == 4 + assert ["request.repository.checks", "request.repository.user_checks"] == bus_recorder.seen_routing_keys assert ( messages.RequestRepositoryChecks( meta=Meta(request_id=""), @@ -49,3 +49,10 @@ async def test_repository_checks(helper): ) in bus_recorder.messages ) + assert messages.RequestRepositoryUserChecks( + meta=Meta(request_id=""), + proposed_change="1790fa8f-dd4d-ed00-58dd-18835e51189a", + repository="1790fa6d-1654-9068-58df-1883e684d3fd", + source_branch="test-pc-1", + target_branch="main", + ) diff --git a/backend/tests/unit/services/test_scheduler.py b/backend/tests/unit/services/test_scheduler.py new file mode 100644 index 0000000000..8c198049dd --- /dev/null +++ b/backend/tests/unit/services/test_scheduler.py @@ -0,0 +1,85 @@ +from typing import Any, List, Optional + +from infrahub.services import InfrahubServices +from infrahub.services.scheduler import InfrahubScheduler, Schedule, run_schedule + + +class FakeLogger: + def __init__(self) -> None: + self.info_logs: List[Optional[str]] = [] + self.error_logs: List[Optional[str]] = [] + + def debug(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + """Send a debug event""" + + def info(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + self.info_logs.append(event) + + def warning(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + """Send a warning event""" + + def error(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + """Send an error event.""" + self.error_logs.append(event) + + def critical(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + """Send a critical event.""" + + def exception(self, event: Optional[str] = None, *args: Any, **kw: Any) -> Any: + """Send an exception event.""" + + +async def nothing_to_see(service: InfrahubServices) -> None: + service.scheduler.running = False + raise NotImplementedError("This function has not been implemented") + + +async def log_once_and_stop(service: InfrahubServices) -> None: + service.log.info("Writing entry to the log") + if len(service.log.info_logs) == 3: + service.scheduler.running = False + + +async def test_scheduler_return_on_not_running(): + """The scheduler should return without writing entries to the log if it is not running.""" + fake_log = FakeLogger() + schedule_manager = InfrahubScheduler() + schedule_manager.running = False + service = InfrahubServices(log=fake_log) + service.scheduler = schedule_manager + schedule = Schedule(name="inactive", interval=10, start_delay=1, function=log_once_and_stop) + await run_schedule(schedule=schedule, service=service) + + assert len(fake_log.info_logs) == 0 + + +async def test_scheduler_exit_after_first(): + """The scheduler should return without writing entries to the log if it is not running.""" + fake_log = FakeLogger() + schedule_manager = InfrahubScheduler() + schedule_manager.running = True + service = InfrahubServices(log=fake_log) + service.scheduler = schedule_manager + schedule = Schedule(name="inactive", interval=1, start_delay=1, function=log_once_and_stop) + await run_schedule(schedule=schedule, service=service) + + assert len(fake_log.info_logs) == 3 + assert fake_log.info_logs[0] == "Started recurring task" + assert fake_log.info_logs[1] == "Writing entry to the log" + assert fake_log.info_logs[2] == "Writing entry to the log" + + +async def test_scheduler_task_with_error(): + """The scheduler should return without writing entries to the log if it is not running.""" + fake_log = FakeLogger() + schedule_manager = InfrahubScheduler() + schedule_manager.running = True + service = InfrahubServices(log=fake_log) + service.scheduler = schedule_manager + schedule = Schedule(name="inactive", interval=1, start_delay=0, function=nothing_to_see) + await run_schedule(schedule=schedule, service=service) + + assert len(fake_log.info_logs) == 1 + assert len(fake_log.error_logs) == 1 + assert fake_log.info_logs[0] == "Started recurring task" + assert fake_log.error_logs[0] == "This function has not been implemented" diff --git a/backend/tests/unit/storage/test_local_storage.py b/backend/tests/unit/storage/test_local_storage.py index 71df91b8b2..1ba1eeca5b 100644 --- a/backend/tests/unit/storage/test_local_storage.py +++ b/backend/tests/unit/storage/test_local_storage.py @@ -1,65 +1,44 @@ import os from pathlib import Path +import fastapi_storages import pytest from infrahub_sdk import UUIDT +from infrahub import config from infrahub.exceptions import NodeNotFound -from infrahub.storage.local import InfrahubLocalStorage, LocalStorageSettings +from infrahub.storage import InfrahubObjectStorage -class set_directory(object): - """Sets the cwd within the context +async def test_init_local(helper, local_storage_dir: str, file1_in_storage: str): + storage = await InfrahubObjectStorage.init(settings=config.SETTINGS.storage) + assert isinstance(storage._storage, fastapi_storages.FileSystemStorage) + assert config.SETTINGS.storage.local.path_ == local_storage_dir - Args: - path (Path): The path to the cwd - """ - def __init__(self, path: Path): - self.path = path - self.origin = Path().absolute() - - def __enter__(self): - os.chdir(self.path) - - def __exit__(self, *args, **kwargs): - os.chdir(self.origin) - - -async def test_init(helper, local_storage_dir: str, file1_in_storage: str): - storage = await InfrahubLocalStorage.init(settings={"directory": local_storage_dir}) - assert isinstance(storage.settings, LocalStorageSettings) - - with set_directory(Path(os.path.dirname(local_storage_dir))): - storage = await InfrahubLocalStorage.init() - assert isinstance(storage.settings, LocalStorageSettings) - assert storage.directory_root == local_storage_dir - - -async def test_generate_path(helper, local_storage_dir: str, file1_in_storage: str): - storage = await InfrahubLocalStorage.init(settings={"directory": local_storage_dir}) - - assert storage.generate_path(identifier="test1") == f"{local_storage_dir}/test1" +async def test_init_s3(helper, s3_storage_bucket: str): + storage = await InfrahubObjectStorage.init(settings=config.SETTINGS.storage) + assert isinstance(storage._storage, fastapi_storages.InfrahubS3ObjectStorage) + assert config.SETTINGS.storage.s3.endpoint_url == "storage.googleapis.com" async def test_retrieve_file(helper, local_storage_dir: str, file1_in_storage: str): - storage = await InfrahubLocalStorage.init(settings={"directory": local_storage_dir}) - - file1 = await storage.retrieve(identifier=file1_in_storage) + storage = await InfrahubObjectStorage.init(settings=config.SETTINGS.storage) + file1 = storage.retrieve(identifier=file1_in_storage) assert file1 async def test_retrieve_file_does_not_exist(helper, local_storage_dir: str): - storage = await InfrahubLocalStorage.init(settings={"directory": local_storage_dir}) + storage = await InfrahubObjectStorage.init(settings=config.SETTINGS.storage) with pytest.raises(NodeNotFound): - await storage.retrieve(identifier="doesnotexist") + storage.retrieve(identifier="doesnotexist") async def test_store_file( helper, local_storage_dir: str, ): - storage = await InfrahubLocalStorage.init(settings={"directory": local_storage_dir}) + storage = await InfrahubObjectStorage.init(settings=config.SETTINGS.storage) fixture_dir = helper.get_fixtures_dir() files_dir = os.path.join(fixture_dir, "schemas") @@ -67,7 +46,7 @@ async def test_store_file( content_file1 = Path(os.path.join(files_dir, filenames[0])).read_bytes() identifier = str(UUIDT()) - await storage.store(identifier=identifier, content=content_file1) + storage.store(identifier=identifier, content=content_file1) file1 = Path(os.path.join(local_storage_dir, identifier)) assert file1.exists() diff --git a/development/Dockerfile b/development/Dockerfile index 618f470f4e..b2f7279848 100644 --- a/development/Dockerfile +++ b/development/Dockerfile @@ -6,19 +6,19 @@ FROM docker.io/python:${PYTHON_VER} AS base ENV PYTHONUNBUFFERED 1 -RUN mkdir /prom_shared -RUN mkdir /remote +ENV PATH="${PATH}:/root/.local/bin" \ + PROMETHEUS_MULTIPROC_DIR="/prom_shared" \ + INFRAHUB_FRONTEND_DIRECTORY="/opt/infrahub/frontend" + +RUN mkdir /prom_shared /remote RUN apt-get update && \ apt-get upgrade -y && \ - apt-get install --no-install-recommends -y pkg-config build-essential && \ - apt-get autoremove -y && \ - apt-get clean all && \ + apt-get install --no-install-recommends -y curl git pkg-config build-essential ca-certificates && \ + curl -sSL https://install.python-poetry.org | python3 - && \ rm -rf /var/lib/apt/lists/* && \ - pip --no-cache-dir install --upgrade pip wheel - -RUN curl -sSL https://install.python-poetry.org | python3 - -ENV PATH="${PATH}:/root/.local/bin" + rm -rf /var/lib/apt/lists/* && \ + pip --no-cache-dir install --no-compile --upgrade pip wheel RUN poetry config virtualenvs.create false @@ -36,7 +36,7 @@ WORKDIR /source RUN npm install --omit=dev COPY frontend/ /source/ -RUN npm run build +RUN npm run build && npm cache clean --force # **************************************************************** # STAGE : Backend @@ -46,22 +46,19 @@ FROM base AS backend # -------------------------------------------- # Configure Git & Environment # -------------------------------------------- -RUN git config --global user.name "Infrahub" -RUN git config --global user.email "infrahub@opsmill.com" -RUN git config --global --add safe.directory '*' -RUN git config --global credential.usehttppath true -RUN git config --global credential.helper /usr/local/bin/infrahub-git-credential +RUN git config --global user.name "Infrahub" && \ + git config --global user.email "infrahub@opsmill.com" && \ + git config --global --add safe.directory '*' && \ + git config --global credential.usehttppath true && \ + git config --global credential.helper /usr/local/bin/infrahub-git-credential -RUN mkdir -p /opt/infrahub/git -RUN mkdir -p /opt/infrahub/storage -RUN mkdir -p /opt/infrahub/source +RUN mkdir -p /opt/infrahub/git /opt/infrahub/storage /opt/infrahub/source /opt/infrahub/frontend/dist WORKDIR /source # -------------------------------------------- # Import Frontend Build # -------------------------------------------- -RUN mkdir -p /opt/infrahub/frontend/dist COPY --from=frontend /source/dist/ /opt/infrahub/frontend/dist # -------------------------------------------- @@ -74,14 +71,21 @@ RUN poetry install --no-interaction --no-ansi --no-root --no-directory # -------------------------------------------- # Copy in the rest of the source code and install the project # -------------------------------------------- -COPY . /source +COPY . ./ RUN poetry install --no-interaction --no-ansi +# -------------------------------------------- +# Purge & Cleanup +# -------------------------------------------- +RUN apt-get autoremove -y && \ + apt-get clean all && \ + rm -rf /var/lib/apt/lists/* + # **************************************************************** # STAGE : Gitpod # **************************************************************** -FROM backend as gitpod +FROM backend AS gitpod # -------------------------------------------- # Create new user and assign the right permissions diff --git a/development/docker-compose-test.yml b/development/docker-compose-test.yml index 4ef2751835..89b1d33d98 100644 --- a/development/docker-compose-test.yml +++ b/development/docker-compose-test.yml @@ -14,7 +14,6 @@ services: - "INFRAHUB_CONFIG=/source/development/infrahub.toml" - "INFRAHUB_PRODUCTION=false" - "INFRAHUB_LOG_LEVEL=CRITICAL" - - "PROMETHEUS_MULTIPROC_DIR=/prom_shared" - "INFRAHUB_TEST_IN_DOCKER=1" - "INFRAHUB_DB_TYPE=${INFRAHUB_DB_TYPE}" volumes: diff --git a/development/docker-compose.yml b/development/docker-compose.yml index aff7ab697f..743b33bb7a 100644 --- a/development/docker-compose.yml +++ b/development/docker-compose.yml @@ -25,15 +25,13 @@ services: - "INFRAHUB_SECURITY_INITIAL_ADMIN_TOKEN=06438eb2-8019-4776-878c-0941b1f1d1ec" - "INFRAHUB_SECURITY_SECRET_KEY=327f747f-efac-42be-9e73-999f08f86b92" - "INFRAHUB_ALLOW_ANONYMOUS_ACCESS=true" - - "PROMETHEUS_MULTIPROC_DIR=/prom_shared" - - "INFRAHUB_FRONTEND_DIRECTORY=/opt/infrahub/frontend" - "INFRAHUB_DB_TYPE=${INFRAHUB_DB_TYPE}" volumes: - ../:/source - "storage_data:/opt/infrahub/storage" tty: true healthcheck: - test: wget -O /dev/null http://localhost:8000/api/schema || exit 1 + test: wget -O /dev/null http://localhost:8000/api/schema/summary || exit 1 interval: 5s timeout: 5s retries: 20 diff --git a/development/infrahub.toml b/development/infrahub.toml index 62a1738382..7aa6b1121f 100644 --- a/development/infrahub.toml +++ b/development/infrahub.toml @@ -19,11 +19,11 @@ password = "infrahub" [cache] address = "cache" -[api] -cors_allow_origins = ["*"] +[storage] +driver = "local" -[storage.settings] -directory = "/opt/infrahub/storage" +[storage.local] +path = "/opt/infrahub/storage" [trace] enable = false diff --git a/docs/_templates/schema/attribute.j2 b/docs/_templates/schema/attribute.j2 new file mode 100644 index 0000000000..722a3f1f21 --- /dev/null +++ b/docs/_templates/schema/attribute.j2 @@ -0,0 +1,89 @@ +--- +label: Attribute +layout: default +order: 800 +--- + +{% macro attribute_constraints(attr) -%} +{% if attr.regex %} Regex: `{{attr.regex}}`{% endif %}{% if attr.regex and (attr.min_length or attr.max_length) %}
{% endif %}{% if attr.min_length or attr.max_length %} Length: min {{attr.min_length | default("-")}}, max {{attr.max_length | default("-")}}{% endif %} +{%- endmacro %} +{% macro bool_to_str(value) -%} +{% if value == true %}True{% else %}False{% endif %} +{%- endmacro %} +{% macro reverse_bool_to_str(value) -%} +{% if value == true %}False{% else %}True{% endif %} +{%- endmacro %} +{% set ns = namespace(node=none) -%} +{% for node in schema.nodes -%} +{% if node.name == "Attribute" -%} +{% set ns.node = node -%} +{% endif -%} +{% endfor -%} +# Attribute + +In a schema file, an attribute can be defined inside a `node`, a `generic` or a `node extension`. + +## Summary + +Below is the list of all available options to define an Attribute in the schema + +| Name | Type | Description | Mandatory | { class="compact" } +| ---- | ---- | ---- | --------- | +{%- for attr in ns.node.attributes | sort(attribute='name')%} +{%- if attr.name not in ["inherited"] %} +| [**{{ attr.name }}**](#{{ attr.name }}) | Attribute | {{ attr.description }} | {{ reverse_bool_to_str(attr.optional) }} | +{%- endif %} +{%- endfor %} + +## Example + +```yaml +nodes: + - name: Rack + attributes: + - name: name + kind: Text + unique: True + description: Unique identifier for the rack +extensions: + nodes: + - kind: CoreProposedChange + attribute: + - name: ticket_id + kind: Text + unique: True + optional: False + description: Internal Ticket ID from Service Now +``` + +## Reference Guide +{% for attr in ns.node.attributes | sort(attribute='name') -%} +{% if attr.name not in ["inherited"] %} +### {{ attr.name }} + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | {{ attr.name }} | +| **Kind** | `{{ attr.kind }}` | +| **Description** | {{ attr.description }} | +| **Optional** | {{ bool_to_str(attr.optional) }} | +| **Default Value** | {{ attr.default_value }} | +| **Constraints** | {{attribute_constraints(attr)}} | +{% if attr.enum -%} +| **Accepted Values** | {% for value in attr.enum %}`{{ value }}` {% endfor %} | +{%- endif %} + +{%- endif %} +{% endfor %} +{% for rel in ns.node.relationships | sort(attribute='name') -%} +{% if rel.name not in ["node"] %} +### {{ rel.name }} + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | {{ rel.name }} | +| **Kind** | {% if rel.cardinality == "one" %}`Object`{%else%}`List`{%endif%} | +| **Description** | {{ rel.description }} | + +{%- endif %} +{% endfor %} diff --git a/docs/_templates/schema/generic.j2 b/docs/_templates/schema/generic.j2 new file mode 100644 index 0000000000..28f21e0108 --- /dev/null +++ b/docs/_templates/schema/generic.j2 @@ -0,0 +1,65 @@ +--- +label: Generic +layout: default +order: 600 +--- + +{% macro attribute_constraints(attr) -%} +{% if attr.regex %} Regex: `{{attr.regex}}`{% endif %}{% if attr.regex and (attr.min_length or attr.max_length) %}
{% endif %}{% if attr.min_length or attr.max_length %} Length: min {{attr.min_length | default("-")}}, max {{attr.max_length | default("-")}}{% endif %} +{%- endmacro %} +{% macro bool_to_str(value) -%} +{% if value == true %}True{% else %}False{% endif %} +{%- endmacro %} +{% macro reverse_bool_to_str(value) -%} +{% if value == true %}False{% else %}True{% endif %} +{%- endmacro %} +{% set ns = namespace(node=none) -%} +{% for node in schema.nodes -%} +{% if node.name == "Generic" -%} +{% set ns.node = node -%} +{% endif -%} +{% endfor -%} +# Generic + +## Summary + +Below is the list of all available options to define a Generic in the schema + +| Name | Type | Description | Mandatory | { class="compact" } +| ---- | ---- | ---- | --------- | +{%- for attr in ns.node.attributes | sort(attribute='name') %} +{%- if attr.name not in ["inherited"] %} +| [**{{ attr.name }}**](#{{ attr.name }}) | Attribute | {{ attr.description }} | {{ reverse_bool_to_str(attr.optional) }} | +{%- endif %} +{%- endfor %} +{%- for rel in ns.node.relationships %} +| [**{{ rel.name }}**](#{{ rel.name }}) | Relationship | {{ rel.description }} | {{ reverse_bool_to_str(rel.optional) }} | +{%- endfor %} + +## Reference Guide +{% for attr in ns.node.attributes | sort(attribute='name') -%} +### {{ attr.name }} + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | {{ attr.name }} | +| **Kind** | `{{ attr.kind }}` | +| **Description** | {{ attr.description }} | +| **Optional** | {{ bool_to_str(attr.optional) }} | +| **Default Value** | {{ attr.default_value }} | +| **Constraints** | {{attribute_constraints(attr)}} | +{% if attr.enum -%} +| **Accepted Values** | {% for value in attr.enum %}`{{ value }}` {% endfor %} | +{%- endif %} + +{% endfor %} +{% for rel in ns.node.relationships | sort(attribute='name') -%} +## {{ rel.name }} + +| -- | -- | { class="compact" } +| ---- | --------------- | +| **Name** | {{ rel.name }} | +| **Kind** | {% if rel.cardinality == "one" %}`Object`{%else%}`List`{%endif%} | +| **Description** | {{ rel.description }} | + +{% endfor %} \ No newline at end of file diff --git a/docs/_templates/schema/node.j2 b/docs/_templates/schema/node.j2 new file mode 100644 index 0000000000..59e533e673 --- /dev/null +++ b/docs/_templates/schema/node.j2 @@ -0,0 +1,65 @@ +--- +label: Node +layout: default +order: 900 +--- + +{% macro attribute_constraints(attr) -%} +{% if attr.regex %} Regex: `{{attr.regex}}`{% endif %}{% if attr.regex and (attr.min_length or attr.max_length) %}
{% endif %}{% if attr.min_length or attr.max_length %} Length: min {{attr.min_length | default("-")}}, max {{attr.max_length | default("-")}}{% endif %} +{%- endmacro %} +{% macro bool_to_str(value) -%} +{% if value == true %}True{% else %}False{% endif %} +{%- endmacro %} +{% macro reverse_bool_to_str(value) -%} +{% if value == true %}False{% else %}True{% endif %} +{%- endmacro %} +{% set ns = namespace(node=none) -%} +{% for node in schema.nodes -%} +{% if node.name == "Node" -%} +{% set ns.node = node -%} +{% endif -%} +{% endfor -%} +# Node + +## Summary + +Below is the list of all available options to define a Node in the schema + +| Name | Type | Description | Mandatory | { class="compact" } +| ---- | ---- | ----------- | --------- | +{%- for attr in ns.node.attributes | sort(attribute='name') %} +{%- if attr.name not in ["inherited"] %} +| [**{{ attr.name }}**](#{{ attr.name }}) | Attribute | {{ attr.description }} | {{ reverse_bool_to_str(attr.optional) }} | +{%- endif %} +{%- endfor %} +{%- for rel in ns.node.relationships | sort(attribute='name') %} +| [**{{ rel.name }}**](#{{ rel.name }}) | Relationship | {{ rel.description }} | {{ reverse_bool_to_str(rel.optional) }} | +{%- endfor %} + +## Reference Guide +{% for attr in ns.node.attributes | sort(attribute='name') -%} +### {{ attr.name }} + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | {{ attr.name }} | +| **Kind** | `{{ attr.kind }}` | +| **Description** | {{ attr.description }} | +| **Optional** | {{ bool_to_str(attr.optional) }} | +| **Default Value** | {{ attr.default_value }} | +| **Constraints** | {{attribute_constraints(attr)}} | +{% if attr.enum -%} +| **Accepted Values** | {% for value in attr.enum %}`{{ value }}` {% endfor %} | +{%- endif %} + +{% endfor %} +{% for rel in ns.node.relationships | sort(attribute='name') -%} +## {{ rel.name }} + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | {{ rel.name }} | +| **Kind** | {% if rel.cardinality == "one" %}`Object`{%else%}`List`{%endif%} | +| **Description** | {{ rel.description }} | + +{% endfor %} diff --git a/docs/_templates/schema/relationship.j2 b/docs/_templates/schema/relationship.j2 new file mode 100644 index 0000000000..7a0959e8f9 --- /dev/null +++ b/docs/_templates/schema/relationship.j2 @@ -0,0 +1,69 @@ +--- +label: Relationship +layout: default +order: 700 +--- + +{% macro attribute_constraints(attr) -%} +{% if attr.regex %} Regex: `{{attr.regex}}`{% endif %}{% if attr.regex and (attr.min_length or attr.max_length) %}
{% endif %}{% if attr.min_length or attr.max_length %} Length: min {{attr.min_length | default("-")}}, max {{attr.max_length | default("-")}}{% endif %} +{%- endmacro %} +{% macro bool_to_str(value) -%} +{% if value == true %}True{% else %}False{% endif %} +{%- endmacro %} +{% macro reverse_bool_to_str(value) -%} +{% if value == true %}False{% else %}True{% endif %} +{%- endmacro %} +{% set ns = namespace(node=none) -%} +{% for node in schema.nodes -%} +{% if node.name == "Relationship" -%} +{% set ns.node = node -%} +{% endif -%} +{% endfor -%} + +# Relationship + +In a schema file, a relationship can be defined inside a `node`, a `generic` or a `node extension`. + +## Summary + +Below is the list of all available options to define a Relationship in the schema + + + +| Name | Type | Description | Mandatory | { class="compact" } +| ---- | ---- | ---- | --------- | +{%- for attr in ns.node.attributes | sort(attribute='name') %} +{%- if attr.name not in ["inherited"] %} +| [**{{ attr.name }}**](#{{ attr.name }}) | Attribute | {{ attr.description }} | {{ reverse_bool_to_str(attr.optional) }} | +{%- endif %} +{%- endfor %} + +## Reference Guide +{% for attr in ns.node.attributes | sort(attribute='name') -%} +### {{ attr.name }} + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | {{ attr.name }} | +| **Kind** | `{{ attr.kind }}` | +| **Description** | {{ attr.description }} | +| **Optional** | {{ bool_to_str(attr.optional) }} | +| **Default Value** | {{ attr.default_value }} | +| **Constraints** | {{attribute_constraints(attr)}} | +{% if attr.enum -%} +| **Accepted Values** | {% for value in attr.enum %}`{{ value }}` {% endfor %} | +{%- endif %} + +{% endfor %} +{% for rel in ns.node.relationships | sort(attribute='name') -%} +{% if rel.name not in ["node"] %} +## {{ rel.name }} + +| -- | -- | { class="compact" } +| ---- | --------------- | +| **Name** | {{ rel.name }} | +| **Kind** | {% if rel.cardinality == "one" %}`Object`{%else%}`List`{%endif%} | +| **Description** | {{ rel.description }} | + +{%- endif %} +{% endfor %} \ No newline at end of file diff --git a/docs/components/index.yml b/docs/components/index.yml deleted file mode 100644 index 56c4ede093..0000000000 --- a/docs/components/index.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -label: Components -icon: "package" -order: 700 diff --git a/docs/development/docs.md b/docs/development/docs.md new file mode 100644 index 0000000000..7845e2f817 --- /dev/null +++ b/docs/development/docs.md @@ -0,0 +1,289 @@ +--- +title: Documentation guide +icon: pencil +--- + +# Documentation guide + +Welcome to the Infrahub documentation guide. This document aims to answer any questions that may come up when creating or updating documentation. + +## Working with the docs site locally + +The recommended way to run and build the docs locally is with Infrahub's suite of `invoke`-driven tasks. To run the commands, you should: + +1. Install [`python`](https://www.python.org/). +2. Install [`invoke`](https://www.pyinvoke.org/). +3. Install [`poetry`](https://python-poetry.org/). +4. Run `poetry install` from the Infrahub project directory. +5. Install [Node.js(and NPM)](https://nodejs.org/en). + +Once these requirements are met, you'll have access to the doc task list. + +```sh +invoke -l docs +``` + +The primary commands are as follows: + +- `invoke docs.build`: Build the documentation website. +- `invoke docs.generate`: Generate source-derived documentation pages (such as the schema and Jinja templates). +- `invoke docs.serve`: Run a local development server for the documentation website. +- `invoke docs.validate`: Validate that the generated documentation has been committed to git. + +## Linting and automation + +Infrahub uses [Vale](https://vale.sh) to check grammar, style, and word usage. You can find Vale's configuration in `.vale.ini`, and the Infrahub styles located in `.vale/styles/Infrahub`. + +[Markdownlint](https://github.com/DavidAnson/markdownlint) is used to encourage consistent markdown files, and Infrahub's configuration is located at `.markdownlint.yaml`. + +Most Vale warnings match up with the [style guide](#style-guide) explanations below. Other warnings often fall into the `Infrahub.spelling` rule. These are caused by misspellings, product names, names of people, or otherwise unknown technical terms. See the [procedures for updating rules](#spelling-errors) below for details on adding terms to the approved list. + +### Install linters locally + +The preferred way to work on the documentation with Vale and markdownlint is directly in an editor. Install the [Vale](https://marketplace.visualstudio.com/items?itemName=chrischinchilla.vale-vscode) and [markdownlint](https://marketplace.visualstudio.com/items?itemName=DavidAnson.vscode-markdownlint) VS Code plugins to enable in-editor warnings. + +### Disabling Vale and markdownlint + +You can disable Vale and markdownlint in-line with the following markdown comments: + +```md + +Ignored Specialized Phrase ignored by vale + + + +## Ignored markdown line + +``` + +This is useful in situations where specific style choices or markdown quirks force the use of an otherwise conflicting rule. In general, it is better to update existing configurations or create new rules rather than disable scanning of individual files. + +### Creating new Vale rules + +For questions regarding how to add to or update an existing rule, see the [Vale styles documentation](https://vale.sh/docs/topics/styles/). A wealth of examples are also available in [GitLab's vale configuration](https://gitlab.com/gitlab-org/gitlab/-/tree/master/doc/.vale/gitlab). + +#### Spelling errors + +If Vale warns of a spelling mistake and the word is valid, you can fix it by updating the `spelling-exceptions.txt` file in the `.vale/styles/` directory. When adding a new term, update and alphabetize the list to make future scanning easier. + +#### Common replacement words + +Add common shorthand words and phrases that have better alternatives to the `swap.yml` rule. For example, `repo` becomes `repository`. + +Add special case capitalization words to the `branded-terms-case-swap.yml` rule. For example, `Github` becomes `GitHub`. + +## Writing markdown + +Pages are written in markdown or generated by the app source. For markdown pages, Retype supports [most standard markdown syntax](https://retype.com/guides/formatting/). + +In addition, Retype has its own [markdown-inspired components](https://retype.com/components/). You'll often find reference links, panels, and snippets used throughout the Infrahub docs. + +### Markdown tips + +#### Ensure proper newlines + +Use two full returns between paragraphs (one empty line). This ensures a new paragraph is created. + +#### Ensure proper h1 tags + +Many pages include a *greymatter* or *metadata* block at the top, denoted by two sets of `---`. This is used by Retype to [configure the page](https://retype.com/faq/#what-is-page-metadata). If using the `title` attribute, also use a top-level heading `#` on the page. They do not need to be the same. If `title` is omitted, Retype will use the top-level heading (h1). + +## Organizing new pages + +We organize all documentation into **four** categories: tutorials, guides, topics, and reference. This is heavily influenced by the [Diátaxis framework](https://diataxis.fr/). The goal is to maintain a more organized, understandable set of docs. + +Here are questions to ask when deciding where to place a new document: + +- Are you walking the user through a scenario? Select **Tutorials**. +- Are you providing steps to complete a specific task? Select **Guides**. +- Are you providing background information, explanation, or abstract concepts? Select **Topics**. +- Are you providing APIs, command references, or concise reference information? Select **Reference**. + +If you're unsure where something goes, diátaxis offers a [map](https://diataxis.fr/needs/) and [compass](https://diataxis.fr/compass/) to help. + +### Tutorials + +Tutorials are an opportunity to guide users through a repeatable process. The purpose is to **provide basic competence** in Infrahub or a feature-set. + +They should: + +- Introduce the user to the end goal. +- Be repeatable by any user. +- Describe practical steps, rather than abstract concepts. +- Provide immediate results. + +The "Getting Started" tutorial is a good example, as it walks the user through a scripted scenario in a demo environment. + +For a deeper dive into tutorials, refer to the [diátaxis tutorials page](https://diataxis.fr/tutorials/). + +>Tutorials are complex learning endeavors. Before deciding if a tutorial is necessary, consider how you might update an existing tutorial or if a guide would be a better option. + +### Guides + +Guides may seem like tutorials, but they are a shorter set of universal instructions that can apply to any user's task. The purpose is to **teach how to perform a specific task**. + +**Naming guideline:** Describe the task that the guide describes, preferably in 2-5 words. + +For example: + +- Installing Infrahub +- Creating new devices +- How to invite collaborators + +For a deeper dive into guides, refer to the [diátaxis guides page](https://diataxis.fr/how-to-guides/). + +### Topics + +Sometimes called *explanations*, topics offer additional context and rationale into the workings of Infrahub. They should answer the question: "how does X work?" + +**Naming guideline:** Write the topic name, but not a sentence. + +For example: + +- Artifact +- User management and authentication + +Begin by giving a one to two sentence description of the topic, then dive in deeper as needed. + +For a deeper dive into topics, refer to the [diátaxis explanations page](https://diataxis.fr/explanation/). + +### Reference + +Reference docs serve a single purpose. To provide quick, clear information when a user needs it. The intention is not that users *read* the reference, but instead they *consult* it as needed when working with Infrahub. + +**Naming guidelines:** Mirror the code-level naming guidelines where possible. This makes it easier to connect docs to code quickly. + +For a deeper dive into reference docs, refer to the [diátaxis reference page](https://diataxis.fr/reference/). + +## Creating application screenshots + +In an effort to keep application screenshots up to date, use [Cypress](https://docs.cypress.io/guides/overview/why-cypress) to generate images whenever possible. Refer to the [E2E tests section](/development/#e2e-tests) of the development docs for details, and reference `frontend/tests/e2e/tutorial` for examples from the [getting started](/tutorials/getting-started/) tutorial. + +## Style guide + +As a general rule, prefer consistency and simplicity when possible. This guide should help in making choices, but it is a living document and should evolve as the needs of the project evolve. For anything not answered below, reference the [Microsoft Style Guide](https://learn.microsoft.com/en-us/style-guide/welcome/). + +General tips: + +- Avoid words like *easy*, *just*, or *simple* to describe how to do something or how "easy" a task is. + +- If a sentence looks too long, it probably is. Try and simplify it or break it into multiple sentences. +- Avoid jargon unless you are sure the reader knows the term. +- Don't hesitate to link between pages and concepts. +- Avoid repeating information when possible, and instead link out to topic or reference pages. + +### Language + +We use American English for most standard text. Unique technical terms are [included below](#common-external-words), or in the [Microsoft A-Z word list](https://learn.microsoft.com/en-us/style-guide/welcome/). + +### Trailing commas + +Use a trailing comma when listing multiple items. This is commonly known as the Oxford comma or serial comma. + +**:x: Don't do this:** There are devices, organizations and users. + +**:white_check_mark: Do this:** There are devices, organizations, and users. + +### Headings and titles + +Headings and titles should capitalize the first word only and end with no punctuation. The exception being any proper noun. + +**:x: Don't do this**: Getting Started! + +**:white_check_mark: Do this**: Getting started + +Every page should have a top-level heading. Additional heading tiers can only exist if a higher tier has been used. + +**:x: Don't do this**: + +```md + +# Page title + +### Smaller heading +``` + +### Avoid over-capitalization + +It is tempting to want to capitalize all feature names. Unless the term is a named marketing feature, avoid capitalization. + +**:x: Don't do this**: Git Repository, API Server, User Management + +**:white_check_mark: Do this**: Git repository, API server, user management + +### Lists + +Capitalize the first letter of each list item. If an item is a complete sentence, give it a period at the end. If it's not, it is okay to omit punctuation. The [Microsoft Style Guide](https://learn.microsoft.com/en-us/style-guide/scannable-content/lists) has a good explanation of how to handle list punctuation. + +When listing items and descriptions, prefer the use of a colon (:) instead of a dash (-). + +```md + +- Not - this +- Or - this + + +- Do: this +- And: this +``` + +### Colons + +Avoid extra spaces before a colon. + +```md + +Feature : Explanation of feature + + + +Feature: Explanation of feature +``` + +### Code blocks + +When creating a code block or snippet with three backticks, make sure to include a language designation. + +~~~md +```sh +this is a shell script +``` +~~~ + + +### Marking code items + +Sometimes you need to mention a `function` or `ModelName`. To do so, use the inline code backticks in markdown. + +### Use i.e. for examples + +Prefer `i.e.` over `e.g.` or `ex.`. In a sentence, `i.e.` is surrounded by commas. + +For example: *Select the current branch, i.e., 'main'.* + +It's also acceptable and clearer to use "for example" or "such as". + +### Common external words + +Refer to external brand guidelines for capitalization rules. Here are some common spellings and uses for brands found in the Infrahub docs. Additional terms can be found in `.vale/styles/Infrahub/branded-terms-case-swap.yml`. + +- GitHub +- GitLab +- Git +- RabbitMQ +- GraphQL +- MacOS +- Linux + +## Documentation release checklist + +Before publishing new changes to documentation, complete the following tasks: + +- [ ] Generate output files for automated pages with `invoke docs.generate`. + - [ ] Confirm build of `infrahubctl` pages. + - [ ] Confirm build of `infrahub-cli` pages. + - [ ] Confirm build of schema pages. +- [ ] [Generate application screenshots](#creating-application-screenshots). +- [ ] If there is a new app version, create a new release notes document in `docs/release-notes`. +- [ ] Run [linters](#linting-and-automation) and fix valid errors on all source files. +- [ ] Perform test build of docs, `invoke docs.build`. \ No newline at end of file diff --git a/docs/development/readme.md b/docs/development/readme.md index 250ba16e8b..9b41e1f96f 100644 --- a/docs/development/readme.md +++ b/docs/development/readme.md @@ -1,9 +1,8 @@ - -# Developer Guide +# Developer guide ## Backend / Python -### Code Linting +### Code linting - **yamllint** - **Black** @@ -15,12 +14,12 @@ `invoke tests` will run all the linter at once to quickly validate if all files have the right format and are compliant with the internal coding guidelines. To help format the code correctly, the project is also recommending: + - **autoflake** to automatically remove all unused variables and all unused import - **isort** to automatically sort all imports > `invoke format` will run Black, autoflake and isort together to ensure all files are as close as possible to the expected format. - ### Run tests ```shell @@ -31,38 +30,43 @@ infrahub test integration tests/integration/user_workflows/ infrahub test client infrahub test ctl ``` + or + ```shell infrahub test unit ``` +## Frontend -## FrontEnd +### Code linting -### Code Linting - **prettier** - **eslint** ### Run tests #### Unit tests -``` + +```sh npm run test ``` or with coverage: -``` + +```sh npm run test:coverage ``` #### Integration tests -``` + +```sh npm run cypress:run:component ``` #### E2E tests -``` +```sh npm run cypress:run:e2e ``` @@ -78,20 +82,19 @@ npm run cypress:run:e2e ![](../media/tests/cypress_e2e_3.png) Now we can: + - verify if the content is correct: ```cy.get("...").should("have.text", "Account");``` - - verify the same thing but for inputs: ```cy.get("...").should("have.value", "some value");``` - - and also trigger a click: ```cy.get("...").click()``` Full documentation: https://docs.cypress.io/guides/end-to-end-testing/writing-your-first-end-to-end-test -## Developer Environment +## Developer environment -### VS Code Extensions +### VS Code extensions - Excalidraw: https://marketplace.visualstudio.com/items?itemName=pomdtr.excalidraw-editor - Jinja: https://marketplace.visualstudio.com/items?itemName=wholroyd.jinja diff --git a/docs/getting-started/readme.md b/docs/getting-started/readme.md deleted file mode 100644 index b6c9dea91b..0000000000 --- a/docs/getting-started/readme.md +++ /dev/null @@ -1,39 +0,0 @@ - -# Build a local Environment - -The project includes a local demo environment that can be used to explore or demo Infrahub. - -The main requirements to run the Sandbox environment are: -- [Invoke](https://www.pyinvoke.org) (version 2 minimum) -- [Docker & Docker Compose](https://docs.docker.com/engine/install/) - -## Prepare the Local Environment - -``` -invoke demo.build -``` - -You can then start all the services with - -``` -invoke demo.start -``` - -[!ref Access the Web interface](http://localhost:8000) -[!ref Access the GraphQL interface](http://localhost:8000/graphql) - - -# Explore Infrahub with some Infrastructure Data - -A demo dataset representing a simple 6 nodes network is available to explore Infrahub with some meaningful data. -You can load the demo data and its associated schema with the following commands. - -``` -invoke demo.load-infra-schema -invoke demo.load-infra-data -``` - -To explore further, [a tutorial](../tutorial/readme.md) is available to guide you through the application and the GraphQL interface. - - -[!ref Check the documentation of the demo environment for more information](../knowledge-base/local-demo-environment.md) diff --git a/docs/guides/index.yml b/docs/guides/index.yml new file mode 100644 index 0000000000..86c011af89 --- /dev/null +++ b/docs/guides/index.yml @@ -0,0 +1,4 @@ +--- +label: Guides +icon: stack +order: 900 diff --git a/docs/guides/installation.md b/docs/guides/installation.md new file mode 100644 index 0000000000..b4ee09986a --- /dev/null +++ b/docs/guides/installation.md @@ -0,0 +1,100 @@ +--- +icon: terminal +--- +# Installing Infrahub + +Infrahub is composed of multiple components. The backend is mostly written in Python and the frontend in React. + +The main components are: + +- A **Frontend** written in react. +- An **API server** written in Python with FastAPI. +- A **Git agent** to manage the interaction with external Git repositories. +- A **Graph database** based on `Neo4j` 5.x or `memgraph`. +- A **Message bus** based on `RabbitMQ`. + +## Docker Compose + +The recommended way to run Infrahub is to use the Docker Compose project included with the project combined with the helper commands defined in `invoke`. + +The pre-requisites for this type of deployment are to have: + +- [Invoke](https://www.pyinvoke.org) (version 2 minimum) and TOML +- [Docker](https://docs.docker.com/engine/install/) (version 24.x minimum) + ++++ MacOS + +### Invoke + +On MacOS, Python is installed by default so you should be able to install `invoke` directly. +Invoke works best when you install it in the main Python environment, but you can also install it in a virtual environment if you prefer. To install `invoke` and `toml`, run the following command: + +```sh +pip install invoke toml +``` + +### Docker + +To install Docker, follow the [official instructions on the Docker website](https://docs.docker.com/desktop/install/mac-install/) for your platform. + ++++ Windows + +On Windows, install a Linux VM via WSL2 and follow the installation guide for Ubuntu. + +!!! +The native support on Windows is currently under investigation and is being tracked in [issue 794](https://github.com/opsmill/infrahub/issues/794). +Please add a comment to the issue if this is something that would be useful to you. +!!! + ++++ Ubuntu + +!!!warning +On Ubuntu, depending on which distribution you're running, there is a good chance your version of Docker might be outdated. Please ensure your installation meets the version requirements mentioned below. +!!! + +### Invoke + +Invoke is a Python package commonly installed by running `pip install invoke toml`. +If Python is not already installed on your system, install it first with `sudo apt install python3-pip`. + +### Docker + +Check if Docker is installed and which version is installed with `docker --version` +The version should be at least `24.x`. If the version is `20.x`, it's recommended to upgrade. + +[This tutorial (for Ubuntu 22.04) explains how to install the latest version of docker on Ubuntu](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-22-04). + ++++ Other + +The deployment should work on any systems that can run a modern version of Docker and Python. + +Please reach out if you need some help and feel free to send a PR with the installation instructions for your platform. + ++++ + +Once docker desktop and invoke are installed you can build, start, and initialize the Infrahub demo environment with the following command: + +```sh +invoke demo.build demo.start demo.load-infra-schema demo.load-infra-data +``` + +[!ref Check the documentation of the demo environment for more information](../topics/local-demo-environment.md) + +## GitPod + +The project is also pre-configured to run in GitPod. + +!!! +GitPod provides a Cloud Development Environment that allows you to run any project right within your browser. +!!! + +GitPod has a generous free tier of 50/hours per month for free. +> For our early testers, we also provide access to our GitPod organization which includes some credits and some pre-built environments to speedup the deployment time. + +[!ref Check Infrahub in GitPod](https://gitpod.io/#/github.com/opsmill/infrahub) + +## K8s with Helm charts + +Support for K8s is not yet available, but we are actively tracking this effort in our short/mid-term roadmap. You can follow [this issue for updates](https://github.com/opsmill/infrahub/issues/506). + +Please reach out and let us know if you are interested in this feature. It helps us prioritize what the team needs to focus on. diff --git a/docs/guides/python-transform.md b/docs/guides/python-transform.md new file mode 100644 index 0000000000..3f6ed242cd --- /dev/null +++ b/docs/guides/python-transform.md @@ -0,0 +1,313 @@ +--- +icon: file-code +label: Creating a Python transform +--- +# Creating a Python transform + +Within Infrahub a [Transform](/topics/transformation) is defined in an [external repository](/topics/repository). However, during development and troubleshooting it is easiest to start from your local computer and run the transform using [infrahubctl transform](/infrahubctl/infrahubctl-transform). + +The goal of this guide is to develop a Python Transform and add it to Infrahub, we will achieve this by following these steps. + +1. Identify the relevant data you want to extract from the database using a [GraphQL query](/topics/graphql), that can take an input parameter to filter the data +2. Write a Python script that use the GraphQL query to read information from the system and transforms the data into a new format +3. Create an entry for the transform within an .infrahub.yml file. +4. Create a Git repository +5. Test the transform with infrahubctl +6. Add the repository to Infrahub as an external repository +7. Validate that the transform works by using the transform API endpoint + +In this guide we are going to work with the builtin tag objects in Infrahub. It won't provide a transform that is very useful, the goal is instead to show how the transforms are created. Once you have mastered the basics you will be ready to go on to create more advanced transforms. + +## 1. Creating a query to collect the desired data + +As the first step we need to have some data in the database to actually query. + +Create three tags, called "red", "green", "blue", either using the frontend or by submitting three GraphQL mutations as per below (just swapping out the name of the color each time). + +```GraphQL +mutation CreateTags { + BuiltinTagCreate( + data: {name: {value: "red"}, description: {value: "The red tag"}} + ) { + ok + object { + id + } + } +} +``` + +The next step is to create a query that returns the data we just created. The rest of this guide assumes that the following query will return a response similar to the response below the query. + +```GraphQL +query TagsQuery { + BuiltinTag { + edges { + node { + name { + value + } + description { + value + } + } + } + } +} +``` + +Response to the tags query: + +```json +{ + "data": { + "BuiltinTag": { + "edges": [ + { + "node": { + "name": { + "value": "blue" + }, + "description": { + "value": "The blue tag" + } + } + }, + { + "node": { + "name": { + "value": "green" + }, + "description": { + "value": "The green tag" + } + } + }, + { + "node": { + "name": { + "value": "red" + }, + "description": { + "value": "The red tag" + } + } + } + ] + } + } +} +``` + +While it would be possible to create a transform that targets all of these tags, for example if you want to create a report, the goal for us is to be able to focus on one of these objects. For this reason we need to modify the query from above to take an input parameter so that we can filter the result to what we want. + +Create a local directory on your computer. + +```sh +mkdir tags_transform +``` + +Then save the below query as a text file named tags_query.gql. + +```GraphQL +query TagsQuery($tag: String!) { + BuiltinTag(name__value: $tag) { + edges { + node { + name { + value + } + description { + value + } + } + } + } +} +``` + +Here the query will require an input parameter called `$name` what will refer to the name of each tag. When we want to query for the red tag the input variables to the query would look like this: + + +```json +{ + "tag": "red" +} +``` + +## 2. Create the Python transform file + +The next step is to create the actual Python transform. The transform is a Python class that inherits from InfrahubTransform from the [Python SDK](/python-sdk). Create a file called tags_transform.py + +```python +from infrahub.transforms import InfrahubTransform + + +class TagsTransform(InfrahubTransform): + + query = "tags_query" + url = "my-tags" + + async def transform(self, data): + tag = data["BuiltinTag"]["edges"][0]["node"] + tag_name = tag["name"]["value"] + tag_description = tag["description"]["value"] + + return { + "tag_title": tag_name.title(), + "bold_description": f"*{tag_description}*".upper() + } +``` + +The example is simplistic in terms of what we do with the data, but all of the important parts of a transform exist here. + +1. We import the InfrahubTransform class + +```python +from infrahub.transforms import InfrahubTransform +``` + +2. We define our own class based on InfrahubTransform. + +```python +class TagsTransform(InfrahubTransform): +``` + +Here we need to note the name of the class as we will need it later, optionally we can just call it `Transform` which is the default name. + +3. We define where data comes from and what API endpoint to use + +```python + query = "tags_query" + url = "my-tags" +``` + +The query part refers to the file tags_query.gql that we created earlier. The URL parameter controls what the endpoint will be when you run this transform by targeting the API server. + +With this configuration the endpoint of our transform will be [http://localhost:8000/api/transform/my-tags](http://localhost:8000/api/transform/my-tags). + +4. The transform method + +```python + async def transform(self, data): + tag = data["BuiltinTag"]["edges"][0]["node"] + tag_name = tag["name"]["value"] + tag_description = tag["description"]["value"] + + return { + "tag_title": tag_name.title(), + "bold_description": f"*{tag_description}*".upper() + } +``` + +When running the transform the `data` input variable will consist of the response to the query we created. In this case we return a JSON object consisting of two keys `tags_title` and `bold_description` where we have modified the data in some way. Here you would return data in the format you need. + +!!!info +If you are unsure of the format of the data you can set a debug marker when testing the transform with infrahubctl: + +```python + async def transform(self, data): + breakpoint() + tag = data["BuiltinTag"]["edges"][0]["node"] + tag_name = tag["name"]["value"] + tag_description = tag["description"]["value"] +``` + +!!! + +## 3. Create a .infrahub.yml file + +In the .infrahub.yml file you define what transforms you have in your repository that you want to make available for Infrahub. + +Create a .infrahub.yml file in the root of the directory. + +```yaml +--- +python_transforms: + - name: tags_transform + class_name: TagsTransform + file_path: "tags_transform.py" +``` + +Two parts here are required, first the `name` of the transform which should be unique across Infrahub and also the `file_path` that should point to the Python file within the repository. In this example we have also defined `class_name`, the reason for this is that we gave our class the name "TagsTransform" instead of the default "Transform". + +## 4. Create a Git repository + +Within the `tags_transform` folder you should now have tree files: + +* tags_query.gql: Contains the GraphQL query +* tags_transform.py: Contains the Python code for the transform +* .infrahub.yml: Contains the definition for the transform + +Before we can test our transform we must add the files to a local Git repository. + +```sh +git init --initial-branch=main +git add . +git commit -m "First commit" +``` + +## 5. Test the transform using infrahubctl + +Using infrahubctl you can first verify that the `.infrahub.yml` file is formatted correctly by listing available transforms. + +```sh +❯ infrahubctl transform --list + +Python transforms defined in repository: 1 +tags_transform (tags_transform.py::TagsTransform) +``` + +!!!info +Trying to run the transform with just the name will produce an error. + +```sh +❯ infrahubctl transform tags_transform + +1 error(s) occured while executing the query + - Message: Variable '$tag' of required type 'String!' was not provided. + Location: [{'line': 1, 'column': 17}] +Aborted. + +``` + +Here we can see that our query is missing the required input for `$tag` which is needed to filter the data. +!!! + +Run the transform and specify the variable name along with the tag we want to target. + +```sh +❯ infrahubctl transform tags_transform tag=red + +{ + "tag_title": "Red", + "bold_description": "*THE RED TAG*" +} +``` + +We have now successfully created a transform. Most of the transforms you will create would be more complex than this, however the main building blocks will always remain the same. It could be that you need the output in OpenConfig format, as Terraform input variables or any other kind of format. + +## 6. Adding the repository to Infrahub + +In order to avoid having the same instructions over and over please refer to the guide [adding a repository to Infrahub](/guides/repository) in order to sync the repository you created and make it available within Infrahub. + +## 7. Accessing the transform from the API + +Once the repository is synced to Infrahub you can access the transform from the API: + +```sh +❯ curl http://localhost:8000/api/transform/my-tags?tag=blue + +{ + "tag_title": "Blue", + "bold_description": "*THE BLUE TAG*" +} + +❯ curl http://localhost:8000/api/transform/my-tags?tag=red + +{ + "tag_title": "Red", + "bold_description": "*THE RED TAG*" +} + +``` diff --git a/docs/guides/readme.md b/docs/guides/readme.md new file mode 100644 index 0000000000..d3a2cb4b58 --- /dev/null +++ b/docs/guides/readme.md @@ -0,0 +1,5 @@ +# Guides + +Guides explain the steps to complete tasks within Infrahub. If you haven't already, start with the [getting started](/tutorials/getting-started/) tutorial. + +- [Installing Infrahub](installation.md) \ No newline at end of file diff --git a/docs/guides/repository.md b/docs/guides/repository.md new file mode 100644 index 0000000000..58ed4cb7e4 --- /dev/null +++ b/docs/guides/repository.md @@ -0,0 +1,9 @@ +--- +icon: repo-forked +label: Adding a repository +--- +# Adding a repository + +!!!warning +Coming Soon +!!! diff --git a/docs/infrahubctl/infrahubctl-check.md b/docs/infrahubctl/infrahubctl-check.md index cf0cf75a02..245f4e32d8 100644 --- a/docs/infrahubctl/infrahubctl-check.md +++ b/docs/infrahubctl/infrahubctl-check.md @@ -35,7 +35,8 @@ $ infrahubctl check run [OPTIONS] [PATH] **Options**: * `--branch TEXT` -* `--rebase / --no-rebase`: [default: rebase] * `--debug / --no-debug`: [default: no-debug] * `--format-json / --no-format-json`: [default: no-format-json] +* `--config-file TEXT`: [env var: INFRAHUBCTL_CONFIG; default: infrahubctl.toml] +* `--name TEXT` * `--help`: Show this message and exit. diff --git a/docs/infrahubctl/infrahubctl-render.md b/docs/infrahubctl/infrahubctl-render.md new file mode 100644 index 0000000000..e5d56105c4 --- /dev/null +++ b/docs/infrahubctl/infrahubctl-render.md @@ -0,0 +1,23 @@ +# `infrahubctl render` + +Render a local Jinja Template (RFile) for debugging purpose. + +**Usage**: + +```console +$ infrahubctl render [OPTIONS] RFILE_NAME [VARIABLES]... +``` + +**Arguments**: + +* `RFILE_NAME`: [required] +* `[VARIABLES]...`: Variables to pass along with the query. Format key=value key=value. + +**Options**: + +* `--branch TEXT`: Branch on which to render the RFile. +* `--debug / --no-debug`: [default: no-debug] +* `--config-file TEXT`: [env var: INFRAHUBCTL_CONFIG; default: infrahubctl.toml] +* `--install-completion`: Install completion for the current shell. +* `--show-completion`: Show completion for the current shell, to copy it or customize the installation. +* `--help`: Show this message and exit. diff --git a/docs/infrahubctl/infrahubctl-run.md b/docs/infrahubctl/infrahubctl-run.md index d64fb45b4f..266eda94d5 100644 --- a/docs/infrahubctl/infrahubctl-run.md +++ b/docs/infrahubctl/infrahubctl-run.md @@ -17,4 +17,9 @@ $ infrahubctl run [OPTIONS] SCRIPT * `--method TEXT`: [default: run] * `--debug / --no-debug`: [default: no-debug] * `--config-file TEXT`: [env var: INFRAHUBCTL_CONFIG; default: infrahubctl.toml] +* `--branch TEXT`: Branch on which to run the script. [default: main] +* `--concurrent INTEGER`: Maximum number of requests to execute at the same time. [env var: INFRAHUBCTL_CONCURRENT_EXECUTION; default: 4] +* `--timeout INTEGER`: Timeout in sec [env var: INFRAHUBCTL_TIMEOUT; default: 60] +* `--install-completion`: Install completion for the current shell. +* `--show-completion`: Show completion for the current shell, to copy it or customize the installation. * `--help`: Show this message and exit. diff --git a/docs/infrahubctl/infrahubctl-transform.md b/docs/infrahubctl/infrahubctl-transform.md new file mode 100644 index 0000000000..621af67518 --- /dev/null +++ b/docs/infrahubctl/infrahubctl-transform.md @@ -0,0 +1,24 @@ +# `infrahubctl transform` + +Render a local transform (TransformPython) for debugging purpose. + +**Usage**: + +```console +$ infrahubctl transform [OPTIONS] [TRANSFORM_NAME] [VARIABLES]... +``` + +**Arguments**: + +* `[TRANSFORM_NAME]`: [default: Name of the Python transformation class] +* `[VARIABLES]...`: Variables to pass along with the query. Format key=value key=value. + +**Options**: + +* `--branch TEXT`: Branch on which to run the transformation +* `--debug / --no-debug`: [default: no-debug] +* `--config-file TEXT`: [env var: INFRAHUBCTL_CONFIG; default: infrahubctl.toml] +* `--list`: Show available transforms +* `--install-completion`: Install completion for the current shell. +* `--show-completion`: Show completion for the current shell, to copy it or customize the installation. +* `--help`: Show this message and exit. diff --git a/docs/infrahubctl/readme.md b/docs/infrahubctl/readme.md index 8d321a2a33..fdc25afdc4 100644 --- a/docs/infrahubctl/readme.md +++ b/docs/infrahubctl/readme.md @@ -3,27 +3,28 @@ `infrahubctl` is a command line utility designed to help with the day to day management of an Infrahub installation. It's meant to run on any laptop or server and it communicates with a remote Infrahub server over the network. -`infrahubctl` can help you to -- Manage the branches in Infrahub : List, Create, Merge, Rebase, Delete -- Manage the schema and load new schema files into Infrahub -- Execute any Python script that requires access to the Python SDK -- Render a Jinja Template locally for troubleshooting -- Execute a GraphQL query store in a Git repository for troubleshooting -- Validate that input files conform with the format expected by Infrahub +`infrahubctl` can help you to: + +- Manage the branches in Infrahub: List, Create, Merge, Rebase, Delete. +- Manage the schema and load new schema files into Infrahub. +- Execute any Python script that requires access to the Python SDK. +- Render a Jinja Template locally for troubleshooting. +- Execute a GraphQL query store in a Git repository for troubleshooting. +- Validate that input files conform with the format expected by Infrahub. ## Configuration -`infrahubctl` requires a minimal set of configuration in order to connect to the right Infrahub server with the correct credentials. These settings can be provided either in a configuration file `infrahubctl.toml` or via environment variables. +`infrahubctl` requires a minimum configuration in order to connect to the right Infrahub server with the correct credentials. These settings can be provided either in a configuration file, `infrahubctl.toml`, or via environment variables. -### Environment Variables +### Environment variables -| Name | Example value | -| -- | -- | -| `INFRAHUB_ADDRESS` | http://localhost:8000 | -| `INFRAHUB_API_TOKEN` | `06438eb2-8019-4776-878c-0941b1f1d1ec` | -| `INFRAHUB_DEFAULT_BRANCH` | main | +| Name | Example value | +| ------------------------- | -------------------------------------- | +| `INFRAHUB_ADDRESS` | http://localhost:8000 | +| `INFRAHUB_API_TOKEN` | `06438eb2-8019-4776-878c-0941b1f1d1ec` | +| `INFRAHUB_DEFAULT_BRANCH` | main | -> the location of a configuration file can be also provided via environment variable : `INFRAHUBCTL_CONFIG` +> You can also provide the location of a configuration file via the environment variable `INFRAHUBCTL_CONFIG`. ### `infrahubctl.toml` file diff --git a/docs/installation/index.yml b/docs/installation/index.yml deleted file mode 100644 index 9ca60db525..0000000000 --- a/docs/installation/index.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -label: Installation -icon: "tools" -order: 850 diff --git a/docs/installation/readme.md b/docs/installation/readme.md deleted file mode 100644 index b64eb04c74..0000000000 --- a/docs/installation/readme.md +++ /dev/null @@ -1,100 +0,0 @@ - - - -# Introduction - -Infrahub is composed of multiple components, the backend is mostly written in Python and the frontend in React. - -The main components are: -- **A Frontend** written in react -- An **API Server** written in Python with FastAPI -- A **Git agent** to manage the interaction with external Git repositories -- A **Graph Database** based on `Neo4j` 5.x or `memgraph` -- A **Message Bus** based on `RabbitMQ` - -## Docker Compose - -Currently, the recommended way to run Infrahub is to use the docker-compose project included with the project combined with the helper commands defined in `invoke` - -The pre-requisites for this type of deployment are to have: -- [Invoke](https://www.pyinvoke.org) (version 2 minimum) and Toml -- [Docker](https://docs.docker.com/engine/install/) (version 24.x minimum) - - - -+++ Mac OS - -### Invoke - -On Mac OS Python is installed by default so you should be able to install `invoke` directly. -Invoke works best when you install it in the main Python but you can also install it in a virtual environment if you prefer. - -``` -pip install invoke toml -``` - -### Docker - -For Docker, you can download Docker Desktop directly from Docker's website with the instructions https://docs.docker.com/desktop/install/mac-install/ - -+++ Windows - -The current recommendation for Windows is to install a Linux VM via WSL2 and follow the installation guide for Ubuntu. - -!!! -The native support on Windows is currently under investigation and is being tracked in the [issue 794](https://github.com/opsmill/infrahub/issues/794). -Please add a comment to the issue if this is something that would be useful to you. -!!! - -+++ Ubuntu - -!!!warning -On Ubuntu, depending on which distribution you're running there is a good chance your version of Docker might be outdated. -!!! - -### Invoke - -Invoke is a Python package that is usually installed with `pip install invoke toml`. -If Python is not already installed on your system you'll need to install it first with `sudo apt install python3-pip` - -### Docker - - -You can check if docker is installed and which version of docker is installed with `docker --version` -The version should be at least `24.x`. if the version is `20.x` it's recommended to upgrade. - -[This tutorial (for Ubuntu 22.04) explains how to install the latest version of docker on Ubuntu](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-22-04) - -+++ Other - -The deployment should work on any systems that can run a modern version of Docker and Python. - -Please reach out if you need some help and feel free to send a PR with the installation instructions for your platform. - -+++ - -Once docker desktop and invoke are properly installed you can build start Infrahub with the following command -``` -invoke demo.build demo.start demo.load-infra-schema demo.load-infra-data -``` - -[!ref Check the documentation of the demo environment for more information](../knowledge-base/local-demo-environment.md) - -## GitPod - -The project is also pre-configured to run in GitPod - -!!! -GitPod provides a Cloud Development Environment that makes it very easy to run any project right within your browser. -!!! - -GitPod has a generous free tier of 50/hours per month for free. -> For our early testers, we also provide access to our GitPod organization which includes some credits and some pre-built environments to speedup the deployment time. - -[!ref Check Infrahub in GitPod](https://gitpod.io/#/github.com/opsmill/infrahub) - -## K8s with Helm Chart - -The support for K8s is not yet available but we are actively tracking this effort in our short/mid-term roadmap -https://github.com/opsmill/infrahub/issues/506 -Please reach out and let us know you are interested in this feature, it's always helpful to prioritize what the team needs to focus on. diff --git a/docs/knowledge-base/architecture.md b/docs/knowledge-base/architecture.md deleted file mode 100644 index bf566dae03..0000000000 --- a/docs/knowledge-base/architecture.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -label: Architecture -layout: default -order: 1000 ---- -# Architecture Diagram - -![](../media/high_level_architecture.excalidraw.svg) - -## Infrahub Components - -### API Server - -Language: Python - -The API Server is serving the REST API and the GraphQL endpoints. -Internally the API Server is built with FastAPI as the web framework and Graphene to generate the GraphQL endpoints. - -!!! -Multiple instance of the API Server can run at the same time to process more requests. -!!! - -### Git Agent - -Language: Python - -The Git agent is responsible for managing all the content related to the Git repositories, it organizes the file systems in order to quickkly access any relevant commit. The Git Agent is periodically pulling the Git Server for updates and it's listening to the RPC channel on the event bus for tasks to execute. -Some of the tasks that can be executed on the Git agent includes: -- Rendering a Jinja template -- Rendering a transform function -- Executing a check -- All Git operations (pull/merge/diff) - -!!! -Multiple instance of the Git Agent can run at the same time to process more requests. -!!! - -### Frontend - -Language: React - -## External Systems - -### Graph Database - -The Graph Database is based on Bolt and Cyper. Currently we have validated both Neo4j 5.x and Memgraph as possible options. -Neo4j is a production grade, battle tested graph database that is used in 1000s of deployments around the world. -Memgraph is a lightweight, very fast, in-memory database that works great for testing and demo. - -### Message Bus - -The message bus is based on RabbitMQ, it supports both a fanout channel to distribute messages to all members at the same time and a RPC framework to distribute work Syncronously. - -### Cache - -The cache is based on Redis, it's mainly used as a central point to support the distributed lock systems between all the different component of the system - -### Git Server (Github/Gitlab) - -Any Git server. The most popular being : GitHub, GitLab or Bitbucket \ No newline at end of file diff --git a/docs/knowledge-base/artifact.md b/docs/knowledge-base/artifact.md deleted file mode 100644 index 7b07411e00..0000000000 --- a/docs/knowledge-base/artifact.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -label: Artifact -layout: default -order: 800 ---- - -# Artifact - -An artifact is the result of a [Transformation](./transformation.md) for a specific context and/or object, it can have different format either in plain text or JSON. - -!!!success Examples -- For a network device, an artifact can be used to track the configuration generated from a Jinja template (RFile) -- For a Security Device, an artifact can be the list of rules in JSON in the format of your choice generated by a Python Transformation -An artifact can also represent the configuration of a DNS server or the configuration of a specific Virtual IP on a load balancer. -!!! - -While it's always possible to generate [Transformations](./transformation.md) on demand via the API, having an Artifact provide some additional benefits: -- **Caching** : Generated Artifact are stored in the internal [object storage](./object-storage.md). For resource intensive Transformation, it will significantly reduce the load of the system if an artifact can be serve from the cache instead of regenerating each time. -- **Traceability** : Past values of an artifact remains available. In a future release, it will be possible to compare the value of an artifact over time. -- **Peer Review** : Artifact are automatically part of the Proposed Change review process - -While the content of an artifact can change, it's identifier will remain the same over time. - -## High level design - -Artifacts are defined by grouping a [Transformation](./transformation.md) with a Group of targets in an Artifact Definition. - -An Artifact Definition centralize all the information required to generate an artifact -- Group of targets -- Transformation -- Format of the output -- Information to extract from each target that must be passed to the Transformation. - -![](../media/artifact.excalidraw.svg) - -## Artifact Definition - -Artifact Definition can be created via the Frontend, via GraphQL or via a Git Repository - -For Infrahub to automatically import an ArtifactDefinition from a Repository, it must be declare in the `.infrahub.yml` file at the root of the repository under the key `artifact_definitions`. - -```yaml ---- -artifact_definitions: - - name: "" - transformation: "" -``` - -## Artifact - -Artifact can be accessed via the frontend and via GraphQL but they shouldn't be manually created, all artifacts should be generated and managed by Infrahub. - -## Examples - -### Startup Configuration for Edge devices - -The project [infrahub-demo-edge](https://github.com/opsmill/infrahub-demo-edge) includes most elements to generate the startup configuration of all Edge Devices. - -in the `.infrahub.yml` the actifact definition is configured as follow: - -```yaml -artifact_definitions: - - name: "Startup Config for Edge devices" - artifact_name: "startup-config" - parameters: - device: "name__value" - content_type: "text/plain" - targets: "edge_router" - transformation: "device_startup" -``` - -- `transformation: "device_startup"` reference the Transformation RFile also define in the same repository. - - The GraphQLQuery `device_startup_info` is indirectly connected to the Artifact Definition via the Transformation. -- `targets: "edge_router"` reference a group of Edge routers named `edge_router`, it must be already present in Infrahub -- `parameters` define the information that must be extracted from each member of the group and that must be passed to the Transformation. Here the Transformation `device_startup` must have a parameter `device` (coming from the GraphQL Query) to render the configuration properly. The value of `device` for each member of the group will be constructed by accessing the value of the name `name__value` - - - - - diff --git a/docs/knowledge-base/auth.md b/docs/knowledge-base/auth.md deleted file mode 100644 index 9e55d241a3..0000000000 --- a/docs/knowledge-base/auth.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -label: User Management and Authentication -layout: default -order: 900 ---- - -### User Management and Authentication - -Infrahub now supports standard user management and authentication systems. - -A user account can have 3 levels of permissions -- `admin` -- `read-write` -- `read-only` - -By default, Infrahub will allow anonymous access in read-only. it's possible to disable this feature via the configuration `main.allow_anonymous_access` or via the environment variable `INFRAHUB_ALLOW_ANONYMOUS_ACCESS` - - -#### Authentication mechanisms - -Infrahub supports two authentication methods -- JWT token: Short live token that are generated on demand from the API -- API Token: Long live token generated ahead of time. - -> API token can be generated via the user profile page or via the Graphql interface. - -| | JWT | TOKEN | -|--------------------|------|-------| -| API / GraphQL | Yes | Yes | -| Frontend | Yes | No | -| Python SDK | Soon | Yes | -| infrahubctl | Soon | Yes | -| GraphQL Playground | No | Yes | - -While using the API the Authentication Token must be provided in a header named `X-INFRAHUB-KEY` - diff --git a/docs/knowledge-base/index.yml b/docs/knowledge-base/index.yml deleted file mode 100644 index 2eda841e41..0000000000 --- a/docs/knowledge-base/index.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -label: Knowledge Base -icon: "book" -order: 700 diff --git a/docs/knowledge-base/local-demo-environment.md b/docs/knowledge-base/local-demo-environment.md deleted file mode 100644 index ab755920f5..0000000000 --- a/docs/knowledge-base/local-demo-environment.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -label: Demo Environment -layout: default -order: 100 ---- -# Local Demo Environment - -A local environment based on Docker Composed is available for demo and testing. -It's designed to be controlled by `invoke` using a list of predefined commands - -| Command | Description | { class="compact" } -| --------------------- | ------------------------------------------------------------ | -| `demo.build` | Build an image with the provided name and python version. | -| `demo.init` | (deprecated) Initialize Infrahub database before using it the first time. | -| `demo.start` | Start a local instance of Infrahub within docker compose. | -| `demo.stop` | Stop the running instance of Infrahub. | -| `demo.destroy` | Destroy all containers and volumes. | -| `demo.cli-git` | Launch a bash shell inside the running Infrahub container. | -| `demo.cli-server` | Launch a bash shell inside the running Infrahub container. | -| `demo.debug` | Start a local instance of Infrahub in debug mode. | -| `demo.status` | Display the status of all containers. | -| `demo.load-infra-schema` | Load the infrastructure_base schema into Infrahub. | -| `demo.load-infra-data` | Generate some data representing a small networks with 6 devices. | - -## Topology - -| Container Name | Image | Description | { class="compact" } -| --------------- | ------------------------ | ------------------------------------------------------ | -| **database** | memgraph/memgraph:2.11.0
or
neo4j:5.6-enterprise | Graph Database | -| **message-queue** | rabbitmq:3.12-management | Message bus based on RabbitMQ | -| **cache** | redis:7.2 | Cache based on Redis, mainly used for distributed lock | -| **infrahub-server** | Dockerfile | Instance of the API Server, running GraphQL | -| **infrahub-git** | Dockerfile | Instance of the Git Agent, managing the Git Repository | -| **frontend** | Dockerfile | Instance of the Frontend | - -[!ref Check the architecture diagram to have more information about each component](./architecture.md) - -## Getting Started - -### Pre-Requisite - -In order to run the demo environment, the following applications must be installed on the systems: -- [pyinvoke](https://www.pyinvoke.org/) -- Docker & Docker Compose - -> On a Laptop, both Docker & Docker Compose can be installed by installing [Docker Desktop](https://www.docker.com/products/docker-desktop/) - -### First utilization - -Before the first utilization you need to build the images for Infrahub with the command: -``` -invoke demo.build -``` -Initialize the database and start the application -``` -invoke demo.start -``` - -### Load some data - -Once you have an environment up and running you can load your own schema or you can explore the one provided with the project using the following commands. -``` -invoke demo.load-infra-schema -invoke demo.load-infra-data -``` - -### Control the local environment - -- `invoke demo.start` : Start all the containers in detached mode. -- `invoke demo.stop` : Stop All the containers -- `invoke demo.destroy` : Destroy all containers and volumes. - - -!!! -`invoke demo.debug` can be used as an alternative to `invoke demo.start`, the main difference is that it will stay *attached* to the containers and all the logs will be displayed in real time in the CLI. -!!! - -## Advanced Settings - -### Support for `sudo` - -On a linux system, the system will try to automatically detect if `sudo` is required to run the docker command or not. - -It's possible to control this setting with the environment variable: `INVOKE_SUDO` - -``` -export INVOKE_SUDO=1 to force sudo -export INVOKE_SUDO=0 to disable it completely -``` - -### Support for `pty` - -On Linux and Mac OS, all commands will be executed with PTY enabled by default. - -It's possible to control this setting with the environment variable: `INVOKE_PTY` - -``` -export INVOKE_PTY=1 to force pty -export INVOKE_PTY=0 to disable it completely -``` - -## Troubleshooting - -At First, it's recommended to check if all containers are still running using `invoke demo.status`. The 5 containers should be running and be present. -- If one is not running, you can try to restart it with `invoke demo.start` -- If the container is still not coming up, you can watch the logs with `docker logs ` (the container name will include the name of the project and a number like `infrahub-dev-infrahub-git-1` ) - -If some containers are still not coming up, it's recommanded to start from a fresh install with `invoke demo.destroy`. \ No newline at end of file diff --git a/docs/knowledge-base/object-storage.md b/docs/knowledge-base/object-storage.md deleted file mode 100644 index fc9dd43b62..0000000000 --- a/docs/knowledge-base/object-storage.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -label: Object Storage -layout: default -order: 500 ---- - -Infrahub provides an interface to easily store and retrieve files in an object storage. The object storage interface is independent of the branches. - -Currently only a local backend is supported but the goal over time is to support multiple backend like AWS S3 to allow users to select where they would like their files to be stored. - -Currently the main interface to interact with the object storage is the REST API, 3 methods are supported - -- GET /api/storage/object/{identifier} -- POST /api/storage/upload/content -- POST /api/storage/upload/file - -Please check the API documentation for more details diff --git a/docs/knowledge-base/readme.md b/docs/knowledge-base/readme.md deleted file mode 100644 index 348748828d..0000000000 --- a/docs/knowledge-base/readme.md +++ /dev/null @@ -1,9 +0,0 @@ -# Knowledge Base - -- [Architecture](./architecture.md) -- [Transformation](./transformation.md) -- [Authentication](./auth.md) -- [Artifact](./artifact.md) -- [Object Storage](./object-storage.md) -- [Demo Environment](./local-demo-environment.md) - diff --git a/docs/media/artifacts.cy.ts/artifacts-2-artifact-details.png b/docs/media/artifacts.cy.ts/artifacts-2-artifact-details.png index 22fc4d043d..145f2a5238 100644 Binary files a/docs/media/artifacts.cy.ts/artifacts-2-artifact-details.png and b/docs/media/artifacts.cy.ts/artifacts-2-artifact-details.png differ diff --git a/docs/media/proposed-changes.cy.ts/proposed-changes-1-create.png b/docs/media/proposed-changes.cy.ts/proposed-changes-1-create.png index f4b815f0a8..c9131b6d13 100644 Binary files a/docs/media/proposed-changes.cy.ts/proposed-changes-1-create.png and b/docs/media/proposed-changes.cy.ts/proposed-changes-1-create.png differ diff --git a/docs/media/proposed-changes.cy.ts/proposed-changes-2-details.png b/docs/media/proposed-changes.cy.ts/proposed-changes-2-details.png index 97e1222c73..1d1af40b3c 100644 Binary files a/docs/media/proposed-changes.cy.ts/proposed-changes-2-details.png and b/docs/media/proposed-changes.cy.ts/proposed-changes-2-details.png differ diff --git a/docs/media/proposed-changes.cy.ts/proposed-changes-3-comments.png b/docs/media/proposed-changes.cy.ts/proposed-changes-3-comments.png new file mode 100644 index 0000000000..d278cd8568 Binary files /dev/null and b/docs/media/proposed-changes.cy.ts/proposed-changes-3-comments.png differ diff --git a/docs/media/proposed-changes.cy.ts/proposed-changes-5-comments-resolved.png b/docs/media/proposed-changes.cy.ts/proposed-changes-5-comments-resolved.png new file mode 100644 index 0000000000..1f648836ae Binary files /dev/null and b/docs/media/proposed-changes.cy.ts/proposed-changes-5-comments-resolved.png differ diff --git a/docs/media/proposed-changes.cy.ts/proposed-changes-6-data-diff.png b/docs/media/proposed-changes.cy.ts/proposed-changes-6-data-diff.png new file mode 100644 index 0000000000..89b86e28ed Binary files /dev/null and b/docs/media/proposed-changes.cy.ts/proposed-changes-6-data-diff.png differ diff --git a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_creation.png b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_creation.png index 74287156a4..484cbc712e 100644 Binary files a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_creation.png and b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_creation.png differ diff --git a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_details.png b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_details.png index c498a6ecae..07c9d4350a 100644 Binary files a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_details.png and b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_details.png differ diff --git a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_diff.png b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_diff.png index 65ed38493d..e484255515 100644 Binary files a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_diff.png and b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_diff.png differ diff --git a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_list.png b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_list.png index 78fca422d3..62ecc555bf 100644 Binary files a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_list.png and b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_branch_list.png differ diff --git a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_create.png b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_create.png index 28879bbf22..e4878c3a18 100644 Binary files a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_create.png and b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_create.png differ diff --git a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_details.png b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_details.png index 867ee986a2..9ec0ee293c 100644 Binary files a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_details.png and b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_details.png differ diff --git a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_edit.png b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_edit.png index c13d79c8a2..761af50b44 100644 Binary files a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_edit.png and b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organization_edit.png differ diff --git a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organizations.png b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organizations.png index e56f56ec3e..fa2add9d50 100644 Binary files a/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organizations.png and b/docs/media/tutorial/tutorial-1-branch-and-version-control.cy.ts/tutorial_1_organizations.png differ diff --git a/docs/media/tutorial/tutorial-2-historical.cy.ts/tutorial_2_historical.png b/docs/media/tutorial/tutorial-2-historical.cy.ts/tutorial_2_historical.png index 78bc23f798..fa64b61f42 100644 Binary files a/docs/media/tutorial/tutorial-2-historical.cy.ts/tutorial_2_historical.png and b/docs/media/tutorial/tutorial-2-historical.cy.ts/tutorial_2_historical.png differ diff --git a/docs/media/tutorial/tutorial-3-schema.cy.ts/tutorial_3_schema.png b/docs/media/tutorial/tutorial-3-schema.cy.ts/tutorial_3_schema.png index 4f61c84951..51c490df98 100644 Binary files a/docs/media/tutorial/tutorial-3-schema.cy.ts/tutorial_3_schema.png and b/docs/media/tutorial/tutorial-3-schema.cy.ts/tutorial_3_schema.png differ diff --git a/docs/media/tutorial/tutorial-4-data.cy.ts/tutorial_4_metadata.png b/docs/media/tutorial/tutorial-4-data.cy.ts/tutorial_4_metadata.png index 3ac9d180b2..f3e19d7118 100644 Binary files a/docs/media/tutorial/tutorial-4-data.cy.ts/tutorial_4_metadata.png and b/docs/media/tutorial/tutorial-4-data.cy.ts/tutorial_4_metadata.png differ diff --git a/docs/media/tutorial/tutorial-4-data.cy.ts/tutorial_4_metadata_edit.png b/docs/media/tutorial/tutorial-4-data.cy.ts/tutorial_4_metadata_edit.png index bdf3d23b82..40db15d853 100644 Binary files a/docs/media/tutorial/tutorial-4-data.cy.ts/tutorial_4_metadata_edit.png and b/docs/media/tutorial/tutorial-4-data.cy.ts/tutorial_4_metadata_edit.png differ diff --git a/docs/media/tutorial/tutorial-6-git-integration.cy.ts/tutorial_6_branch_creation.png b/docs/media/tutorial/tutorial-6-git-integration.cy.ts/tutorial_6_branch_creation.png index 96997b0ccd..e32cfb5649 100644 Binary files a/docs/media/tutorial/tutorial-6-git-integration.cy.ts/tutorial_6_branch_creation.png and b/docs/media/tutorial/tutorial-6-git-integration.cy.ts/tutorial_6_branch_creation.png differ diff --git a/docs/media/tutorial/tutorial-6-git-integration.cy.ts/tutorial_6_interface_update.png b/docs/media/tutorial/tutorial-6-git-integration.cy.ts/tutorial_6_interface_update.png index 10c896cbe6..47c63fa342 100644 Binary files a/docs/media/tutorial/tutorial-6-git-integration.cy.ts/tutorial_6_interface_update.png and b/docs/media/tutorial/tutorial-6-git-integration.cy.ts/tutorial_6_interface_update.png differ diff --git a/docs/python-sdk/20_branches.md b/docs/python-sdk/branches.md similarity index 84% rename from docs/python-sdk/20_branches.md rename to docs/python-sdk/branches.md index 1a45dc1658..567a7409d4 100644 --- a/docs/python-sdk/20_branches.md +++ b/docs/python-sdk/branches.md @@ -1,28 +1,24 @@ --- -label: Branches Management +label: Branches management layout: default order: 500 --- # Branches management with the Python SDK -The Python SDK can be used for all standard operations on the branches : list, merge, rebase .. - +The Python SDK can be used for all standard operations on the branches: list, merge, rebase, and so on. ## List all the branches :::code source="../../python_sdk/examples/branch_list.py" ::: - -## Create a Branch +## Create a branch :::code source="../../python_sdk/examples/branch_create.py" ::: - ## Rebase a branch :::code source="../../python_sdk/examples/branch_rebase.py" ::: - ## Merge a branch :::code source="../../python_sdk/examples/branch_merge.py" ::: diff --git a/docs/python-sdk/15_create_update_delete.md b/docs/python-sdk/create-update-delete.md similarity index 95% rename from docs/python-sdk/15_create_update_delete.md rename to docs/python-sdk/create-update-delete.md index 9451451b65..7b8ff8d453 100644 --- a/docs/python-sdk/15_create_update_delete.md +++ b/docs/python-sdk/create-update-delete.md @@ -1,15 +1,17 @@ --- -label: Create & Update Nodes +label: Create and update nodes layout: default order: 650 --- +# Create and update nodes ## Create +++ Async -#### Method 1 +### Method 1 + ```python from infrahub_sdk import InfrahubClient @@ -20,7 +22,8 @@ await obj.save() print(f"New user created with the Id {obj.id}") ``` -#### Method 2 +### Method 2 + ```python from infrahub_sdk import InfrahubClient @@ -31,7 +34,9 @@ print(f"New user created with the Id {obj.id}") ``` +++ Sync -#### Method 1 + +### Method 1 + ```python from infrahub_sdk import InfrahubClientSync @@ -41,7 +46,9 @@ obj = client.create(kind="CoreAccount", data=data) obj.save() print(f"New user created with the Id {obj.id}") ``` -#### Method 2 + +### Method 2 + ```python from infrahub_sdk import InfrahubClientSync @@ -50,10 +57,11 @@ obj = client.create(kind="CoreAccount", name="janedoe", label="Jane Doe", type=" obj.save() print(f"New user created with the Id {obj.id}") ``` -+++ ++++ ## Update + +++ Async ```python @@ -75,8 +83,8 @@ obj = client.get(kind="CoreAccount", name__value="admin") obj.label.value = "Administrator" obj.save() ``` -+++ ++++ ## Delete @@ -89,7 +97,9 @@ client = await InfrahubClient.init(address="http://localhost:8000") obj = await client.get(kind="CoreAccount", name__value="johndoe") await obj.delete() ``` + +++ Sync + ```python from infrahub_sdk import InfrahubClientSync @@ -97,5 +107,5 @@ client = InfrahubClientSync.init(address="http://localhost:8000") obj = client.get(kind="CoreAccount", name__value="johndoe") obj.delete() ``` -+++ ++++ diff --git a/docs/python-sdk/10_query.md b/docs/python-sdk/query.md similarity index 97% rename from docs/python-sdk/10_query.md rename to docs/python-sdk/query.md index b1c8a2f8b4..831ce1d0af 100644 --- a/docs/python-sdk/10_query.md +++ b/docs/python-sdk/query.md @@ -1,8 +1,9 @@ --- -label: Query Data from Infrahub +label: Query data from Infrahub layout: default order: 800 --- +# Query data from Infrahub The Python SDK has 3 main methods to query data from Infrahub @@ -12,51 +13,59 @@ The Python SDK has 3 main methods to query data from Infrahub > It's also possible to execute a GraphQL query directly with `client.execute_graphql()` - +++ Async + ```python from infrahub_sdk import InfrahubClient client = await InfrahubClient.init(address="http://localhost:8000") accounts = await client.all(kind="CoreAccount") ``` + ```python from infrahub_sdk import InfrahubClient client = await InfrahubClient.init(address="http://localhost:8000") accounts = await client.get(kind="CoreAccount", id="XXX") ``` + ```python from infrahub_sdk import InfrahubClient client = await InfrahubClient.init(address="http://localhost:8000") accounts = await client.filters(kind="CoreAccount", name__value="admin") ``` + +++ Sync + ```python from infrahub_sdk import InfrahubClientSync client = InfrahubClientSync.init(address="http://localhost:8000") accounts = client.all(kind="CoreAccount") ``` + ```python from infrahub_sdk import InfrahubClientSync client = InfrahubClientSync.init(address="http://localhost:8000") accounts = client.get(kind="CoreAccount", id="XXX") ``` + ```python from infrahub_sdk import InfrahubClientSync client = InfrahubClientSync.init(address="http://localhost:8000") accounts = client.filters(kind="CoreAccount", name__value="admin") ``` + +++ All 3 methods will return an `InfrahubNode` object or a list of `InfrahubNode` objects. All Attributes and Relationships defined in the schema will be automatically built into the object when it's being initialized. +++ Async + ```python from infrahub_sdk import InfrahubClient @@ -64,7 +73,9 @@ client = await InfrahubClient.init(address="http://localhost:8000") account = await client.get(kind="CoreRepository", id="XXX") print(account.name.value) ``` + +++ Sync + ```python from infrahub_sdk import InfrahubClientSync @@ -72,8 +83,8 @@ client = InfrahubClientSync.init(address="http://localhost:8000") account = client.get(kind="CoreRepository", id="XXX") print(account.name.value) ``` -+++ ++++ ## Control what will be queried @@ -83,24 +94,27 @@ By default the query will include, the attributes, the relationships of cardinal it's possible to add or remove some attributes and/or relationships from the query with `include` and `exclude` - +++ Async + ```python from infrahub_sdk import InfrahubClient client = await InfrahubClient.init(address="http://localhost:8000") accounts = await client.all(kind="CoreRepository", exclude=["tags"], include=["queries"]) ``` + +++ Sync + ```python from infrahub_sdk import InfrahubClientSync client = InfrahubClientSync.init(address="http://localhost:8000") accounts = client.all(kind="CoreRepository", exclude=["tags"], include=["queries"]) ``` + +++ -## Managing Relationships +## Managing relationships There are situations to consider while managing relationships via the SDK, depending if the relationship was included in the query or not. @@ -109,6 +123,7 @@ Relationships that are included in a query will be automatically `initialized` w In both cases, you can `fetch()` all the peers of a relationship +++ Async + ```python from infrahub_sdk import InfrahubClient, InfrahubNode @@ -117,7 +132,9 @@ account: InfrahubNode = await client.get(kind="CoreRepository", id="XXXX") await account.tags.fetch() tags: List[InfrahubNode] = [ tag.peer for tag in account.tags ] ``` + +++ Sync + ```python from infrahub_sdk import InfrahubClientSync, InfrahubNodeSync @@ -126,4 +143,5 @@ account: InfrahubNodeSync = client.get(kind="CoreRepository", id="XXXX") account.tags.fetch() tags: List[InfrahubNodeSync] = [ tag.peer for tag in account.tags ] ``` -+++ \ No newline at end of file + ++++ diff --git a/docs/python-sdk/readme.md b/docs/python-sdk/readme.md index e2c269ecf4..f3046a6793 100644 --- a/docs/python-sdk/readme.md +++ b/docs/python-sdk/readme.md @@ -4,58 +4,62 @@ A Python SDK for Infrahub greatly simplifies how we can interact with Infrahub p ## Installation -> The Python SDK is currently hosted in the same repository as Infrahub, but once both reaches a better maturity state, the plan is to make it easy to install the SDK as a stand alone package. +The Infrahub SDK for Python is available on [PyPI](https://pypi.org/project/infrahub-sdk/) and can be installed using the `pip` package installer. It is recommended to install the SDK into a virtual environment. -For now, the recommendation is to clone the main Infrahub repository on your file system and to install the entire infrahub package in your own repository using a relative path with the `--editable` flag. - -``` -poetry add --editable +```sh +python3 -m venv .venv +source .venv/bin/activate +pip install infrahub-sdk ``` -## Getting Started +## Getting started -The SDK supports both synchronous and asynchronous Python. The default asynchronous version is provided by the `InfrahubClient` class while the synchronous version is using the `InfrahubClientSync` class. +The SDK supports both synchronous and asynchronous Python. The default asynchronous version is provided by the `InfrahubClient` class while the synchronous version uses the `InfrahubClientSync` class. +### Dynamic schema discovery -### Dynamic Schema Discovery - -By default, the Python client will automatically gather the active schema from Infrahub and all methods will be autogenerated based on that. +By default, the Python client will automatically gather the active schema from Infrahub and all methods will generate based on that. +++ Async + ```python from infrahub_sdk import InfrahubClient client = await InfrahubClient.init(address="http://localhost:8000") ``` + +++ Sync + ```python from infrahub_sdk import InfrahubClientSync client = InfrahubClientSync.init(address="http://localhost:8000") ``` -+++ ++++ ### Authentication -The SDK is using a Token based authentication method to authenticate with the API and GraphQL +The SDK is using a token-based authentication method to authenticate with the API and GraphQL. -The token can either be provided with `config=Config(api_token="TOKEN")` at initialization time or it can be automatically retrieved -from the environment variable `INFRAHUB_SDK_API_TOKEN` +The token can either be provided with `config=Config(api_token="TOKEN")` at initialization time or it can be retrieved automatically from the environment variable `INFRAHUB_SDK_API_TOKEN`. -> In the demo environment the default token for the Admin account is `06438eb2-8019-4776-878c-0941b1f1d1ec` +> In the demo environment, the default token for the Admin account is `06438eb2-8019-4776-878c-0941b1f1d1ec`. +++ Async + ```python from infrahub_sdk import InfrahubClient, Config client = await InfrahubClient.init(config=Config(api_token="TOKEN")) ``` + +++ Sync + ```python from infrahub_sdk import InfrahubClientSync, Config client = InfrahubClientSync.init(config=Config(api_token="TOKEN")) ``` -+++ ++++ diff --git a/docs/readme.md b/docs/readme.md index 98488ad59f..bf5046234b 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -1,13 +1,22 @@ +--- +label: "Home" +icon: "home" +--- + ![](./media/Infrahub-horizontal.svg) + + +# Welcome to Infrahub Infrahub is taking a new approach to Infrastructure Automation by providing a new generation of datastore to organize and control all the data that defines how an infrastructure should run. At its heart, Infrahub is built on 3 fundamental pillars + - **Powerful Schema**, easily extensible - **Unified Version Control** for Data and Files - **Data Synchronization** with Traceability and Ownership -# Getting Started +## Getting started - Deploy the demo environment on your laptop - Deploy an instance of Infrahub in the Cloud with GitPod @@ -15,11 +24,12 @@ At its heart, Infrahub is built on 3 fundamental pillars - Explore the capabilities of the schema - Check the architecture document to understand how the application is built -# Project Status +## Project status The project is currently in a Tech Preview phase, which means that not all features we are targeting for the first major release have been implemented yet and the project is still evolving at a very rapid pace with the (un)stability that you would expect in an early project. Having said that there are several features already available and you should be able already to: + - Manage branches and query any branches - Integrate a Git repository within Infrahub - Integrate and render Jinja2 templates @@ -31,8 +41,7 @@ Having said that there are several features already available and you should be - Branch diff Missing features we are actively working on. + - Object Profile and Inheritance - Pull requests - Branches support for the schema and schema migration - - diff --git a/docs/components/api-server/index.yml b/docs/reference/api-server/index.yml similarity index 83% rename from docs/components/api-server/index.yml rename to docs/reference/api-server/index.yml index 3f935801c1..3febb5656b 100644 --- a/docs/components/api-server/index.yml +++ b/docs/reference/api-server/index.yml @@ -2,4 +2,4 @@ label: API Server # icon: "key-asterisk" icon: alert -order: 900 +order: 700 diff --git a/docs/components/api-server/readme.md b/docs/reference/api-server/readme.md similarity index 86% rename from docs/components/api-server/readme.md rename to docs/reference/api-server/readme.md index d4fda815ae..bbefe2cd6c 100644 --- a/docs/components/api-server/readme.md +++ b/docs/reference/api-server/readme.md @@ -1,6 +1,6 @@ -# API Server +# API server !!!warning Under Construction This page is still under construction and is not available yet.
-Please reach out in Slack if you have some questions about the **API Server** +Please reach out in Slack if you have some questions about the **API server** !!! \ No newline at end of file diff --git a/docs/installation/configuration.md b/docs/reference/configuration.md similarity index 85% rename from docs/installation/configuration.md rename to docs/reference/configuration.md index 8b661500d4..667e9e97f3 100644 --- a/docs/installation/configuration.md +++ b/docs/reference/configuration.md @@ -1,17 +1,23 @@ --- -label: Configuration File +label: Configuration file layout: default +icon: tools order: 900 --- -# Configuration File +# Configuration file + !!!warning Under Construction + This page is still under construction and is not available yet.
-Please reach out in Slack if you have some questions about the **Configuration File** +Please reach out in Slack if you have some questions about the **Configuration file** + !!! Until a better documentation is available, the best reference to understand what options are available in the configuration file is the code itself. The configuration file format is defined in Pydantic models in `infrahub/config.py` ==- Explore the Source Code for the configuration File + :::code source="../../backend/infrahub/config.py" ::: -==- \ No newline at end of file + +==- diff --git a/docs/components/git-agent/index.yml b/docs/reference/git-agent/index.yml similarity index 100% rename from docs/components/git-agent/index.yml rename to docs/reference/git-agent/index.yml diff --git a/docs/components/git-agent/readme.md b/docs/reference/git-agent/readme.md similarity index 86% rename from docs/components/git-agent/readme.md rename to docs/reference/git-agent/readme.md index eecf90fea3..a21c1ad0b4 100644 --- a/docs/components/git-agent/readme.md +++ b/docs/reference/git-agent/readme.md @@ -1,6 +1,6 @@ -# Git Agent +# Git agent !!!warning Under Construction This page is still under construction and is not available yet.
-Please reach out in Slack if you have some questions about the **Git Agent** +Please reach out in Slack if you have some questions about the **Git agent** !!! \ No newline at end of file diff --git a/docs/reference/index.yml b/docs/reference/index.yml new file mode 100644 index 0000000000..f2d1c70ec4 --- /dev/null +++ b/docs/reference/index.yml @@ -0,0 +1,4 @@ +--- +label: Reference +icon: info +order: 700 diff --git a/docs/components/infrahub-cli/index.yml b/docs/reference/infrahub-cli/index.yml similarity index 80% rename from docs/components/infrahub-cli/index.yml rename to docs/reference/infrahub-cli/index.yml index d9c6140324..6c63288a32 100644 --- a/docs/components/infrahub-cli/index.yml +++ b/docs/reference/infrahub-cli/index.yml @@ -1,4 +1,4 @@ --- label: infrahub cli icon: "diff-ignored" -order: 700 +order: 900 diff --git a/docs/components/infrahub-cli/infrahub-db.md b/docs/reference/infrahub-cli/infrahub-db.md similarity index 94% rename from docs/components/infrahub-cli/infrahub-db.md rename to docs/reference/infrahub-cli/infrahub-db.md index 09740f00f9..05fc21f891 100644 --- a/docs/components/infrahub-cli/infrahub-db.md +++ b/docs/reference/infrahub-cli/infrahub-db.md @@ -36,7 +36,7 @@ $ infrahub db init [OPTIONS] ## `infrahub db load-test-data` -Load test data into the database from the test_data directory. +Load test data into the database from the `test_data` directory. **Usage**: diff --git a/docs/components/infrahub-cli/infrahub-git-agent.md b/docs/reference/infrahub-cli/infrahub-git-agent.md similarity index 91% rename from docs/components/infrahub-cli/infrahub-git-agent.md rename to docs/reference/infrahub-cli/infrahub-git-agent.md index d52dbdd481..0cfab51458 100644 --- a/docs/components/infrahub-cli/infrahub-git-agent.md +++ b/docs/reference/infrahub-cli/infrahub-git-agent.md @@ -34,7 +34,6 @@ $ infrahub git-agent start [OPTIONS] [PORT] **Options**: -* `--interval INTEGER`: Interval in sec between remote repositories update. [default: 10] * `--debug / --no-debug`: Enable advanced logging and troubleshooting [default: no-debug] * `--config-file TEXT`: Location of the configuration file to use for Infrahub [env var: INFRAHUB_CONFIG; default: infrahub.toml] * `--help`: Show this message and exit. diff --git a/docs/components/infrahub-cli/infrahub-server.md b/docs/reference/infrahub-cli/infrahub-server.md similarity index 100% rename from docs/components/infrahub-cli/infrahub-server.md rename to docs/reference/infrahub-cli/infrahub-server.md diff --git a/docs/reference/readme.md b/docs/reference/readme.md new file mode 100644 index 0000000000..3c2b03b933 --- /dev/null +++ b/docs/reference/readme.md @@ -0,0 +1,9 @@ +# Reference + +Technical reference documents for Infrahub API and components. + +- [Schema](schema/) +- [Infrahub CLI](infrahub-cli/infrahub-db/) +- [Configuration file](configuration.md) +- [Git agent](git-agent/) +- [API server](api-server/) \ No newline at end of file diff --git a/docs/reference/schema/attribute.md b/docs/reference/schema/attribute.md new file mode 100644 index 0000000000..3168314837 --- /dev/null +++ b/docs/reference/schema/attribute.md @@ -0,0 +1,240 @@ +--- +label: Attribute +layout: default +order: 800 +--- + + + + +# Attribute + +In a schema file, an attribute can be defined inside a `node`, a `generic` or a `node extension`. + +## Summary + +Below is the list of all available options to define an Attribute in the schema + +| Name | Type | Description | Mandatory | { class="compact" } +| ---- | ---- | ---- | --------- | +| [**branch**](#branch) | Attribute | Type of branch support for the attribute, if not defined it will be inherited from the node. | False | +| [**choices**](#choices) | Attribute | Define a list of valid choices for a dropdown attribute. | False | +| [**default_value**](#default_value) | Attribute | Default value of the attribute. | False | +| [**description**](#description) | Attribute | Short description of the attribute. | False | +| [**enum**](#enum) | Attribute | Define a list of valid values for the attribute. | False | +| [**kind**](#kind) | Attribute | Defines the type of the attribute. | True | +| [**label**](#label) | Attribute | Human friendly representation of the name. Will be autogenerated if not provided | False | +| [**max_length**](#max_length) | Attribute | Set a maximum number of characters allowed for a given attribute. | False | +| [**min_length**](#min_length) | Attribute | Set a minimum number of characters allowed for a given attribute. | False | +| [**name**](#name) | Attribute | Attribute name, must be unique within a model and must be all lowercase. | True | +| [**optional**](#optional) | Attribute | Indicate if this attribute is mandatory or optional. | False | +| [**order_weight**](#order_weight) | Attribute | Number used to order the attribute in the frontend (table and view). | False | +| [**read_only**](#read_only) | Attribute | Set the attribute as Read-Only, users won't be able to change its value. Mainly relevant for internal object. | False | +| [**regex**](#regex) | Attribute | Regex uses to limit limit the characters allowed in for the attributes. | False | +| [**unique**](#unique) | Attribute | Indicate if the value of this attribute must be unique in the database for a given model. | False | + +## Example + +```yaml +nodes: + - name: Rack + attributes: + - name: name + kind: Text + unique: True + description: Unique identifier for the rack +extensions: + nodes: + - kind: CoreProposedChange + attribute: + - name: ticket_id + kind: Text + unique: True + optional: False + description: Internal Ticket ID from Service Now +``` + +## Reference Guide + +### branch + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | branch | +| **Kind** | `Text` | +| **Description** | Type of branch support for the attribute, if not defined it will be inherited from the node. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | +| **Accepted Values** | `aware` `agnostic` `local` | + +### choices + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | choices | +| **Kind** | `List` | +| **Description** | Define a list of valid choices for a dropdown attribute. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### default_value + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | default_value | +| **Kind** | `Any` | +| **Description** | Default value of the attribute. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### description + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | description | +| **Kind** | `Text` | +| **Description** | Short description of the attribute. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | Length: min -, max 128 | + + +### enum + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | enum | +| **Kind** | `List` | +| **Description** | Define a list of valid values for the attribute. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + + +### kind + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | kind | +| **Kind** | `Text` | +| **Description** | Defines the type of the attribute. | +| **Optional** | False | +| **Default Value** | | +| **Constraints** | Length: min 3, max 32 | +| **Accepted Values** | `ID` `Dropdown` `Text` `TextArea` `DateTime` `Email` `Password` `HashedPassword` `URL` `File` `MacAddress` `Color` `Number` `Bandwidth` `IPHost` `IPNetwork` `Checkbox` `List` `JSON` `Any` `String` `Integer` `Boolean` | + +### label + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | label | +| **Kind** | `Text` | +| **Description** | Human friendly representation of the name. Will be autogenerated if not provided | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | Length: min -, max 32 | + + +### max_length + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | max_length | +| **Kind** | `Number` | +| **Description** | Set a maximum number of characters allowed for a given attribute. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### min_length + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | min_length | +| **Kind** | `Number` | +| **Description** | Set a minimum number of characters allowed for a given attribute. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### name + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | name | +| **Kind** | `Text` | +| **Description** | Attribute name, must be unique within a model and must be all lowercase. | +| **Optional** | False | +| **Default Value** | | +| **Constraints** | Regex: `^[a-z0-9\_]+$`
Length: min 3, max 32 | + + +### optional + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | optional | +| **Kind** | `Boolean` | +| **Description** | Indicate if this attribute is mandatory or optional. | +| **Optional** | True | +| **Default Value** | True | +| **Constraints** | | + + +### order_weight + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | order_weight | +| **Kind** | `Number` | +| **Description** | Number used to order the attribute in the frontend (table and view). | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### read_only + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | read_only | +| **Kind** | `Boolean` | +| **Description** | Set the attribute as Read-Only, users won't be able to change its value. Mainly relevant for internal object. | +| **Optional** | True | +| **Default Value** | False | +| **Constraints** | | + + +### regex + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | regex | +| **Kind** | `Text` | +| **Description** | Regex uses to limit limit the characters allowed in for the attributes. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### unique + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | unique | +| **Kind** | `Boolean` | +| **Description** | Indicate if the value of this attribute must be unique in the database for a given model. | +| **Optional** | True | +| **Default Value** | False | +| **Constraints** | | + + + diff --git a/docs/reference/schema/generic.md b/docs/reference/schema/generic.md new file mode 100644 index 0000000000..53d3561899 --- /dev/null +++ b/docs/reference/schema/generic.md @@ -0,0 +1,194 @@ +--- +label: Generic +layout: default +order: 600 +--- + + + + +# Generic + +## Summary + +Below is the list of all available options to define a Generic in the schema + +| Name | Type | Description | Mandatory | { class="compact" } +| ---- | ---- | ---- | --------- | +| [**branch**](#branch) | Attribute | Type of branch support for the model. | False | +| [**default_filter**](#default_filter) | Attribute | Default filter used to search for a node in addition to its ID. | False | +| [**description**](#description) | Attribute | Short description of the Generic. | False | +| [**display_labels**](#display_labels) | Attribute | List of attributes to use to generate the display label | False | +| [**icon**](#icon) | Attribute | Defines the icon to be used for this object type. | False | +| [**include_in_menu**](#include_in_menu) | Attribute | Defines if objects of this kind should be included in the menu. | False | +| [**label**](#label) | Attribute | Human friendly representation of the name/kind | False | +| [**menu_placement**](#menu_placement) | Attribute | Defines where in the menu this object should be placed. | False | +| [**name**](#name) | Attribute | Generic name, must be unique within a namespace and must start with an uppercase letter. | True | +| [**namespace**](#namespace) | Attribute | Generic Namespace, Namespaces are used to organize models into logical groups and to prevent name collisions. | True | +| [**order_by**](#order_by) | Attribute | List of attributes to use to order the results by default | False | +| [**used_by**](#used_by) | Attribute | List of Nodes that are referencing this Generic | False | +| [**attributes**](#attributes) | Relationship | | False | +| [**relationships**](#relationships) | Relationship | | False | + +## Reference Guide +### branch + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | branch | +| **Kind** | `Text` | +| **Description** | Type of branch support for the model. | +| **Optional** | True | +| **Default Value** | aware | +| **Constraints** | | +| **Accepted Values** | `aware` `agnostic` `local` | + +### default_filter + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | default_filter | +| **Kind** | `Text` | +| **Description** | Default filter used to search for a node in addition to its ID. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | Regex: `^[a-z0-9\_]+$` | + + +### description + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | description | +| **Kind** | `Text` | +| **Description** | Short description of the Generic. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | Length: min -, max 128 | + + +### display_labels + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | display_labels | +| **Kind** | `List` | +| **Description** | List of attributes to use to generate the display label | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### icon + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | icon | +| **Kind** | `Text` | +| **Description** | Defines the icon to be used for this object type. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### include_in_menu + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | include_in_menu | +| **Kind** | `Boolean` | +| **Description** | Defines if objects of this kind should be included in the menu. | +| **Optional** | True | +| **Default Value** | True | +| **Constraints** | | + + +### label + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | label | +| **Kind** | `Text` | +| **Description** | Human friendly representation of the name/kind | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | Length: min -, max 32 | + + +### menu_placement + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | menu_placement | +| **Kind** | `Text` | +| **Description** | Defines where in the menu this object should be placed. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### name + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | name | +| **Kind** | `Text` | +| **Description** | Generic name, must be unique within a namespace and must start with an uppercase letter. | +| **Optional** | False | +| **Default Value** | | +| **Constraints** | Regex: `^[A-Z][a-zA-Z0-9]+$`
Length: min 2, max 32 | + + +### namespace + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | namespace | +| **Kind** | `Text` | +| **Description** | Generic Namespace, Namespaces are used to organize models into logical groups and to prevent name collisions. | +| **Optional** | False | +| **Default Value** | | +| **Constraints** | Regex: `^[A-Z][a-zA-Z0-9]+$`
Length: min 3, max 32 | + + +### order_by + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | order_by | +| **Kind** | `List` | +| **Description** | List of attributes to use to order the results by default | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### used_by + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | used_by | +| **Kind** | `List` | +| **Description** | List of Nodes that are referencing this Generic | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + + +## attributes + +| -- | -- | { class="compact" } +| ---- | --------------- | +| **Name** | attributes | +| **Kind** | `List` | +| **Description** | | + +## relationships + +| -- | -- | { class="compact" } +| ---- | --------------- | +| **Name** | relationships | +| **Kind** | `List` | +| **Description** | | + diff --git a/docs/schema/index.yml b/docs/reference/schema/index.yml similarity index 72% rename from docs/schema/index.yml rename to docs/reference/schema/index.yml index 2bf31b5703..a5a1799be2 100644 --- a/docs/schema/index.yml +++ b/docs/reference/schema/index.yml @@ -1,4 +1,4 @@ --- label: Schema icon: "repo" -order: 700 +order: 1000 diff --git a/docs/reference/schema/node-extension.md b/docs/reference/schema/node-extension.md new file mode 100644 index 0000000000..1a17232458 --- /dev/null +++ b/docs/reference/schema/node-extension.md @@ -0,0 +1,48 @@ + +--- +label: Node Extension +layout: default +order: 850 +--- + +# Node Extension + +Below is the list of all available options to define a node extension in the schema + + +## Summary + +Below is the list of all available options to define a Node in the schema + +| Name | Type | Description | Mandatory | { class="compact" } +| ---- | ---- | ----------- | --------- | +| [**kind**](#kind) | Attribute | Node to extend. | True | +| [**attributes**](#attributes) | Relationship | List of Attribute to add to the Node. | True | +| [**relationships**](#relationships) | Relationship | List of Relationship to add to the Node. | True | + +## Reference Guide +### kind + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | kind | +| **Kind** | `Text` | +| **Description** | Node kind, must exist in the schema and must be in CamelCase | +| **Optional** | False | +| **Constraints** | Regex: `^[A-Z][a-zA-Z0-9]+$`
Length: min 2, max 32 | + +### attributes + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | attributes | +| **Kind** | `List` | +| **Description** | List of Attribute to add to the Node. | + +### relationships + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | relationships | +| **Kind** | `List` | +| **Description** | List of Relationship to add to the Node. | diff --git a/docs/reference/schema/node.md b/docs/reference/schema/node.md new file mode 100644 index 0000000000..c346590a87 --- /dev/null +++ b/docs/reference/schema/node.md @@ -0,0 +1,207 @@ +--- +label: Node +layout: default +order: 900 +--- + + + + +# Node + +## Summary + +Below is the list of all available options to define a Node in the schema + +| Name | Type | Description | Mandatory | { class="compact" } +| ---- | ---- | ----------- | --------- | +| [**branch**](#branch) | Attribute | Type of branch support for the model. | False | +| [**default_filter**](#default_filter) | Attribute | Default filter used to search for a node in addition to its ID. | False | +| [**description**](#description) | Attribute | Short description of the model, will be visible in the frontend. | False | +| [**display_labels**](#display_labels) | Attribute | List of attributes to use to generate the display label | False | +| [**groups**](#groups) | Attribute | List of Group that this Node is part of. | False | +| [**icon**](#icon) | Attribute | Defines the icon to be used for this object type. | False | +| [**include_in_menu**](#include_in_menu) | Attribute | Defines if objects of this kind should be included in the menu. | False | +| [**inherit_from**](#inherit_from) | Attribute | List of Generic Kind that this node is inheriting from | False | +| [**label**](#label) | Attribute | Human friendly representation of the name/kind | False | +| [**menu_placement**](#menu_placement) | Attribute | Defines where in the menu this object should be placed. | False | +| [**name**](#name) | Attribute | Node name, must be unique within a namespace and must start with an uppercase letter. | True | +| [**namespace**](#namespace) | Attribute | Node Namespace, Namespaces are used to organize models into logical groups and to prevent name collisions. | True | +| [**order_by**](#order_by) | Attribute | List of attributes to use to order the results by default | False | +| [**attributes**](#attributes) | Relationship | List of supported Attributes for the Node. | False | +| [**relationships**](#relationships) | Relationship | List of supported Relationships for the Node. | False | + +## Reference Guide +### branch + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | branch | +| **Kind** | `Text` | +| **Description** | Type of branch support for the model. | +| **Optional** | True | +| **Default Value** | aware | +| **Constraints** | | +| **Accepted Values** | `aware` `agnostic` `local` | + +### default_filter + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | default_filter | +| **Kind** | `Text` | +| **Description** | Default filter used to search for a node in addition to its ID. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | Regex: `^[a-z0-9\_]+$` | + + +### description + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | description | +| **Kind** | `Text` | +| **Description** | Short description of the model, will be visible in the frontend. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | Length: min -, max 128 | + + +### display_labels + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | display_labels | +| **Kind** | `List` | +| **Description** | List of attributes to use to generate the display label | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### groups + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | groups | +| **Kind** | `List` | +| **Description** | List of Group that this Node is part of. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### icon + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | icon | +| **Kind** | `Text` | +| **Description** | Defines the icon to be used for this object type. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### include_in_menu + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | include_in_menu | +| **Kind** | `Boolean` | +| **Description** | Defines if objects of this kind should be included in the menu. | +| **Optional** | True | +| **Default Value** | True | +| **Constraints** | | + + +### inherit_from + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | inherit_from | +| **Kind** | `List` | +| **Description** | List of Generic Kind that this node is inheriting from | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### label + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | label | +| **Kind** | `Text` | +| **Description** | Human friendly representation of the name/kind | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | Length: min -, max 32 | + + +### menu_placement + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | menu_placement | +| **Kind** | `Text` | +| **Description** | Defines where in the menu this object should be placed. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### name + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | name | +| **Kind** | `Text` | +| **Description** | Node name, must be unique within a namespace and must start with an uppercase letter. | +| **Optional** | False | +| **Default Value** | | +| **Constraints** | Regex: `^[A-Z][a-zA-Z0-9]+$`
Length: min 2, max 32 | + + +### namespace + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | namespace | +| **Kind** | `Text` | +| **Description** | Node Namespace, Namespaces are used to organize models into logical groups and to prevent name collisions. | +| **Optional** | False | +| **Default Value** | | +| **Constraints** | Regex: `^[A-Z][a-zA-Z0-9]+$`
Length: min 3, max 32 | + + +### order_by + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | order_by | +| **Kind** | `List` | +| **Description** | List of attributes to use to order the results by default | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + + +## attributes + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | attributes | +| **Kind** | `List` | +| **Description** | List of supported Attributes for the Node. | + +## relationships + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | relationships | +| **Kind** | `List` | +| **Description** | List of supported Relationships for the Node. | + diff --git a/docs/schema/readme.md b/docs/reference/schema/readme.md similarity index 55% rename from docs/schema/readme.md rename to docs/reference/schema/readme.md index fc16696fd8..bba4d4c07a 100644 --- a/docs/schema/readme.md +++ b/docs/reference/schema/readme.md @@ -1,24 +1,25 @@ # Schema -In Infrahub, the schema is at the center of most things and our goal is to provide as much flexibility as possible to the users to extend and customize the schema. +In Infrahub, the schema is at the center of most things and our goal is to provide as much flexibility as possible to allow users to extend and customize the schema. -Out of the box, Infrahub doesn't have a schema for most things and it's up to the users to load a schema that fits their needs. Over time we are planning to maintain different schemas for the common type of use cases, but for now, we are providing one example schema to model a simple network with basic objects like Device, Interface, IPAddress etc +Out of the box, Infrahub doesn't have a schema for most things and it's up to users to load a schema that fits their needs. Over time we plan to maintain different schemas for the common types of use cases, but for now, we are providing one example schema to model a basic network with objects like Device, Interface, IPAddress, etc. -Unlike traditional databases that can only have one schema at a time, in Infrahub, it is possible to have a different schema per branch. This is possible because the schema itself is stored in the database like any other object. +Unlike traditional databases that can only have one schema at a time, in Infrahub it is possible to have a different schema per branch. This is possible because the schema itself is stored in the database like any other object. New schema can be uploaded via the `infrahubctl schema load` command or via the REST API directly. !!!info -In the Tech Preview not all features of the schema are available yet, there are still some important changes coming like the support for schema migration and schema dependencies. +In the Tech Preview not all features of the schema are available yet. There are still some important changes coming like support for schema migration and schema dependencies. !!! ## Namespace, Node, Attributes, Relationships & Generics -The schema is composed of 4 primary types of object: [!badge Nodes] that are themselves composed of [!badge Attributes] and [!badge Relationships] and finally [!badge Generics] -- A [!badge Node] in Infrahub represents a `Model`. -- An [!badge Attribute] represents a direct value associated with a [!badge Node] like a `Text`, a `Number` etc ... -- A [!badge Relationship] represents a unidirectional link between 2 [!badge Node], a [!badge Relationship] can be of cardinality `one` or `many`. -- A [!badge Generics] can be used to share some attributes between multiple [!badge Node], if you're familiar with programming concept, it's close to class inheritance. +The schema is composed of 4 primary types of objects: `Nodes`- that are themselves composed of `Attributes` and `Relationships` and finally `Generics`. + +- A `Node` in Infrahub represents a `Model`. +- An `Attribute` represents a direct value associated with a `Node` like a `Text`, a `Number` etc ... +- A `Relationship` represents a link between 2 `Node`, a `Relationship` can be of cardinality `one` or `many`. +- A `Generic` can be used to share some attributes between multiple `Node`, if you're familiar with programming concept, it's close to class inheritance. In the example below, the node `Person` has 2 attributes (`name` and `description`) and the node `Car` has 1 attribute (`model`) and 1 relationship to `Person`, identified by `owner``. @@ -46,41 +47,43 @@ nodes: kind: Attribute ``` -[!badge Node], [!badge Attribute] and [!badge Relationship] are defined by their `kind`. While the name and the namespace of the node are up to the creator of the schema, the kinds for the attributes and the relationships are coming from Infrahub. The `kind` of an attribute, or a relationship, is very important because it defined how each element will be represented in GraphQL and the UI. +`Node`, `Attribute`, and `Relationship` are defined by their `kind`. While the name and the namespace of the node are up to the creator of the schema, the kinds for the attributes and the relationships are coming from Infrahub. The `kind` of an attribute, or a relationship, is important because it defines how each element is represented in GraphQL and the UI. > The `kind` of a model is generated by concatenating the `namespace` and the `name`. -### Attribute Kinds +### Attribute kinds + - `Text`: Standard Text - `Number`: Standard Number - `TextArea`: Long-form Text that can span multiple lines - `Boolean`: Flag that can be either True or False - `DateTime`: A Data and a Time - `Email`: Email address -- `Password`: A Text String that should be offuscated. +- `Password`: A Text String that should be obfuscated - `URL`: An URL to a website or a resource over HTTP - `File`: Path to a file on the filesystem - `MacAddress`: Mac Address following the format (XX:XX:XX:XX:XX:XX) -- `Color`: A html color +- `Color`: An HTML color - `Bandwidth`: Bandwidth in kbps -- `IPHost`: Ip Address in either IPV4 or IPv6 format -- `IPNetwork`: Ip Network in either IPV4 or IPv6 format +- `IPHost`: IP Address in either IPV4 or IPv6 format +- `IPNetwork`: IP Network in either IPV4 or IPv6 format - `Checkbox`: Duplicate of `Boolean` - `List`: List of any value - `JSON`: Any data structure compatible with JSON - `Any`: Can be anything -### Relationship Kinds +### Relationship kinds - `Generic`: Default relationship without specific significance - `Attribute`: Relationship of type Attribute are represented in the detailed view and the list view -- `Component`: Indicate a relationship with another node that is a component of the current node, Example: Interface is a component to a Device -- `Parent`: Indicate a relationship with another node that is a parent to the current node, Example: Device is a parent to an Interface -- `Group`: Indicate a relationship to a member or a subscriber of a group. - -==- Attribute Kinds Behavior in the UI -| Kind | Display in List View | Display in Detailed View | { class="compact" } -|--------------|-----------------------|---------------------------| +- `Component`: Indicate a relationship with another node that is a component of the current node. Example: Interface is a component to a Device +- `Parent`: Indicate a relationship with another node that is a parent to the current node. Example: Device is a parent to an Interface +- `Group`: Indicate a relationship to a member or a subscriber of a group + +==- Attribute kinds behavior in the UI +{ class="compact" } +| Kind | Display in List View | Display in Detailed View | +| ------------ | --------------------- | ------------------------- | | `ID` | No | Yes | | `Text` | Yes | Yes | | `Number` | Yes | Yes | @@ -99,30 +102,31 @@ nodes: | `List` | No | Yes | | `Any` | No | Yes | -==- Relationship Kinds Behavior in the UI - -| ID | cardinality | Display in List View | Display in Detailed View | Display in Tab | { class="compact" } -|-----------|-------------|-----------------------|---------------------------|----------------| -| `Generic` | `one` | No | Yes | No | -| `Generic` | `many` | No | No | Yes | -| `Attribute` | `one` | Yes | Yes | No | -| `Attribute` | `many` | Yes | Yes | No | -| `Component` | `one` | No | Yes | No | -| `Component` | `many` | No | No | Yes | -| `Parent` | `one` | No | Yes | No | -| `Parent` | `many` | No | Yes | No | +==- Relationship kinds behavior in the UI +{ class="compact" } +| ID | cardinality | Display in List View | Display in Detailed View | Display in Tab | +| ----------- | ----------- | --------------------- | ------------------------- | -------------- | +| `Generic` | `one` | No | Yes | No | +| `Generic` | `many` | No | No | Yes | +| `Attribute` | `one` | Yes | Yes | No | +| `Attribute` | `many` | Yes | Yes | No | +| `Component` | `one` | No | Yes | No | +| `Component` | `many` | No | No | Yes | +| `Parent` | `one` | No | Yes | No | +| `Parent` | `many` | No | Yes | No | === ## Generics -A Generic can be used to: +A generic can be used to: + - Share multiple attributes or relationships between different types of nodes. - Connect multiple types of Node to the same relationship. - Define Attribute and Relationship on a specific list of nodes and avoid creating attributes for everything -In the example below, we took the schema that we used previously and we refactored it using Generic -Now `Car` is a Generic with 2 attributes and 1 relationship and 2 models `ElectricCar` and `GazCar` are referencing it. +In the example below, we took the schema that we used previously and refactored it using a generic +Now `Car` is a generic with 2 attributes and 1 relationship and 2 models. `ElectricCar` and `GazCar` are referencing it. In the GraphQL schema, `ElectricCar` and `GazCar` will have all the attributes and the relationships of `Car` in addition to the one defined under their respective section. ```yaml @@ -171,27 +175,29 @@ nodes: ``` -## Branch Support +## Branch support + +By default, all models defined in the schema will be **branch-aware** which means that any changes to an object based on a **branch-aware** model will be local to the branch and will not affect the other branches. -By default, all models defined in the schema will be **branch aware** which means that any changes to an object based on a **branch aware** model will be local to the branch and will not affect the other branches. +A model can also be configured as: -A model can also be configured as : - **branch agnostic**: All changes to an object based on a **branch agnostic** model will automatically be available in all branches. - **branch local**: All changes will stay local to the branch. A model in **branch local** mode will not be affected by the Diff and the Merge. ### Summary -| Branch Support | Description | Diff | Merge | Rebase | -|--------------|--------------------------------------------------------------------------------------|------|-------|--------| -| **Aware** | All changes will be local to the branch and can be merged back into the main branch. | Yes | Yes | Yes | -| **Agnostic** | All changes will automatically be available in all branches | No | No | No | -| **Local** | All changes will be local to the branch and will not be merged to other branches. | No | No | Yes | +{ class="compact" } +| Branch Support | Description | Diff | Merge | Rebase | +| -------------- | ------------------------------------------------------------------------------------ | ---- | ----- | ------ | +| **Aware** | All changes will be local to the branch and can be merged back into the main branch. | Yes | Yes | Yes | +| **Agnostic** | All changes will automatically be available in all branches | No | No | No | +| **Local** | All changes will be local to the branch and will not be merged to other branches. | No | No | Yes | +### Branch agnostic -### Branch Agnostic -In the frontend, the API or the GraphQL endpoint, **branch agnostic** objects can be modified on any branch, no restrictions apply. +In the frontend, the API, or the GraphQL endpoint **branch-agnostic** objects can be modified on any branch—no restrictions apply. -To configure a model as **branch agnostic** you need to set the option `branch` to `agnostic` in the schema +To configure a model as **branch-agnostic** you need to set the option `branch` to `agnostic` in the schema ```yaml nodes: @@ -203,9 +209,9 @@ nodes: name: name ``` -### Attribute and Relationship +### Attributes and relationships -Attributes and Relationships can be configured as **branch aware**, **branch agnostic** or **branch local** too, independently of the configuration of the model itself using the parameter: `branch` +Attributes and relationships can be configured as **branch-aware**, **branch-agnostic**, or **branch-local** too, independently of the configuration of the model itself using the parameter `branch`. ```yaml nodes: @@ -219,18 +225,21 @@ nodes: ``` By default, if a specific value is not defined: + - **attributes** will inherit the configuration of their parent model. - **relationships** will become: - - **branch agnostic** only if both models, on each end of the relationship, are **branch agnostic**. If either model is **branch aware** the relationship will be set as **branch aware**. - - **branch local** if either model, on each end of the relationship, is **branch local**. + - **branch-agnostic** only if both models on each end of the relationship are **branch-agnostic**. If either model is **branch-aware** the relationship will be set as **branch-aware**. + - **branch-local** if either model, on each end of the relationship, is **branch-local**. + +## Schema file -## Schema File +The recommended way to manage and load a schema is to create a schema file in YAML format. With a schema file it's possible to: -The recommended way to manage and load a schema is to create a schema file in Yaml format, with a schema file it's possible to - Define new nodes - Extend nodes, by adding attributes or relationships to the existing nodes -At a high level, the format of the schema file looks like that. +At a high level, the format of the schema file looks like the following: + ```yaml --- version: '1.0' @@ -242,23 +251,25 @@ extensions: ``` ==- Example of schema file that is defining new nodes and adding a relationship to an existing one -:::code source="../../models/infrastructure_extension_rack.yml" ::: +:::code source="../../../models/infrastructure_extension_rack.yml" ::: ==- -### Load a Schema file +### Load a schema file Schema files can be loaded into Infrahub with the `infrahubctl` command or directly via the Git integration - + #### infrahubctl command + +The `infrahubctl` command can be used to load individual schema files or multiple files as part of a directory. -The `infrahubctl` command can be used to load indivual schema file or multiple files as part of a directory. -``` +```sh infrahubctl schema load ``` #### Git integration -The schemas that should be loaded must be declared in the ``.infrahub.yml`` directory, under schemas. +The schemas that should be loaded must be declared in the ``.infrahub.yml`` directory, under schemas. + > Individual files and directory are both supported. ```yaml diff --git a/docs/reference/schema/relationship.md b/docs/reference/schema/relationship.md new file mode 100644 index 0000000000..fa004368eb --- /dev/null +++ b/docs/reference/schema/relationship.md @@ -0,0 +1,180 @@ +--- +label: Relationship +layout: default +order: 700 +--- + + + + +# Relationship + +In a schema file, a relationship can be defined inside a `node`, a `generic` or a `node extension`. + +## Summary + +Below is the list of all available options to define a Relationship in the schema + + + +| Name | Type | Description | Mandatory | { class="compact" } +| ---- | ---- | ---- | --------- | +| [**branch**](#branch) | Attribute | Type of branch support for the relatioinship, if not defined it will be determine based both peers. | False | +| [**cardinality**](#cardinality) | Attribute | Defines how many objects are expected on the other side of the relationship. | False | +| [**description**](#description) | Attribute | Short description of the relationship. | False | +| [**direction**](#direction) | Attribute | Defines the direction of the relationship, Unidirectional relationship are required when the same model is on both side. | False | +| [**identifier**](#identifier) | Attribute | Unique identifier of the relationship within a model, identifiers must match to traverse a relationship on both direction. | False | +| [**kind**](#kind) | Attribute | Defines the type of the relationship. | True | +| [**label**](#label) | Attribute | Human friendly representation of the name. Will be autogenerated if not provided | False | +| [**name**](#name) | Attribute | Relationship name, must be unique within a model and must be all lowercase. | True | +| [**optional**](#optional) | Attribute | Indicate if this relationship is mandatory or optional. | False | +| [**order_weight**](#order_weight) | Attribute | Number used to order the relationship in the frontend (table and view). | False | +| [**peer**](#peer) | Attribute | Type (kind) of objects supported on the other end of the relationship. | True | + +## Reference Guide +### branch + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | branch | +| **Kind** | `Text` | +| **Description** | Type of branch support for the relatioinship, if not defined it will be determine based both peers. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | +| **Accepted Values** | `aware` `agnostic` `local` | + +### cardinality + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | cardinality | +| **Kind** | `Text` | +| **Description** | Defines how many objects are expected on the other side of the relationship. | +| **Optional** | True | +| **Default Value** | many | +| **Constraints** | | +| **Accepted Values** | `one` `many` | + +### description + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | description | +| **Kind** | `Text` | +| **Description** | Short description of the relationship. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | Length: min -, max 128 | + + +### direction + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | direction | +| **Kind** | `Text` | +| **Description** | Defines the direction of the relationship, Unidirectional relationship are required when the same model is on both side. | +| **Optional** | True | +| **Default Value** | bidirectional | +| **Constraints** | | +| **Accepted Values** | `bidirectional` `outbound` `inbound` | + +### identifier + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | identifier | +| **Kind** | `Text` | +| **Description** | Unique identifier of the relationship within a model, identifiers must match to traverse a relationship on both direction. | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | Regex: `^[a-z0-9\_]+$`
Length: min -, max 128 | + + +### inherited + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | inherited | +| **Kind** | `Boolean` | +| **Description** | Internal value to indicate if the relationship was inherited from a Generic node. | +| **Optional** | True | +| **Default Value** | False | +| **Constraints** | | + + +### kind + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | kind | +| **Kind** | `Text` | +| **Description** | Defines the type of the relationship. | +| **Optional** | False | +| **Default Value** | Generic | +| **Constraints** | | +| **Accepted Values** | `Generic` `Attribute` `Component` `Parent` `Group` | + +### label + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | label | +| **Kind** | `Text` | +| **Description** | Human friendly representation of the name. Will be autogenerated if not provided | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | Length: min -, max 32 | + + +### name + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | name | +| **Kind** | `Text` | +| **Description** | Relationship name, must be unique within a model and must be all lowercase. | +| **Optional** | False | +| **Default Value** | | +| **Constraints** | Regex: `^[a-z0-9\_]+$`
Length: min 3, max 32 | + + +### optional + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | optional | +| **Kind** | `Boolean` | +| **Description** | Indicate if this relationship is mandatory or optional. | +| **Optional** | True | +| **Default Value** | False | +| **Constraints** | | + + +### order_weight + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | order_weight | +| **Kind** | `Number` | +| **Description** | Number used to order the relationship in the frontend (table and view). | +| **Optional** | True | +| **Default Value** | | +| **Constraints** | | + + +### peer + +| Key | Value | { class="compact" } +| ---- | --------------- | +| **Name** | peer | +| **Kind** | `Text` | +| **Description** | Type (kind) of objects supported on the other end of the relationship. | +| **Optional** | False | +| **Default Value** | | +| **Constraints** | Regex: `^[A-Z][a-zA-Z0-9]+$`
Length: min 3, max 32 | + + + + diff --git a/docs/release-nodes/index.yml b/docs/release-notes/index.yml similarity index 100% rename from docs/release-nodes/index.yml rename to docs/release-notes/index.yml diff --git a/docs/release-nodes/release-0.6.0.md b/docs/release-notes/release-0.6.0.md similarity index 97% rename from docs/release-nodes/release-0.6.0.md rename to docs/release-notes/release-0.6.0.md index f2fa05a1e9..a6310de894 100644 --- a/docs/release-nodes/release-0.6.0.md +++ b/docs/release-notes/release-0.6.0.md @@ -2,7 +2,11 @@ order: 060 --- -## Main Changes +# Release 0.6.0 + +## Main changes + + ### New Logo and refresh of the frontend @@ -32,7 +36,7 @@ Infrahub supports two authentication methods > API token can be generated via the user profile page or via the Graphql interface. | | JWT | TOKEN | -|--------------------|------|-------| +| ------------------ | ---- | ----- | | API / GraphQL | Yes | Yes | | Frontend | Yes | No | | Python SDK | Soon | Yes | diff --git a/docs/release-nodes/release-0.7.0.md b/docs/release-notes/release-0.7.0.md similarity index 95% rename from docs/release-nodes/release-0.7.0.md rename to docs/release-notes/release-0.7.0.md index 2ab1203cd8..faf6d97cb6 100644 --- a/docs/release-nodes/release-0.7.0.md +++ b/docs/release-notes/release-0.7.0.md @@ -1,9 +1,12 @@ --- order: 070 --- -## Main Changes +# Release 0.7.0 -### Proposed Change +## Main changes + + +### Proposed change A Proposed Change provides a single workflow to integrate the changes from a given branch into the main branch. It is the equivalent of a Pull Request or a Merge Request for Infrahub. @@ -11,6 +14,7 @@ It is the equivalent of a Pull Request or a Merge Request for Infrahub. When a user is ready to integrate their change into the main branch, they can create a Proposed Change. The Proposed Change panel groups all information related to the change and it will allow other members of the team to review and comment the changes as needed. Information related to a change: + - Data changes (diff) - Files changes (diff) - Artifacts changes (diff) @@ -24,6 +28,7 @@ Information related to a change: An artifact is the result of a Transformation for a specific context and/or object, it can have different format either in plain text or JSON. An artifact improve the Transformation by providing the following additional features: + - **Caching** : Generated Artifact are stored in the internal object storage. For a resource intensive Transformation, it will significantly reduce the load of the system if an artifact can be serve from the cache instead of regenerating each time. - **Traceability** : Past values of an artifact remains available. In a future release, it will be possible to compare the value of an artifact over time. - **Peer Review** : Artifacts are automatically part of the Proposed Change review process @@ -35,12 +40,12 @@ While the content of an artifact can change, its identifier will remain the same It's now possible to define in the schema how a given model, attribute or relationship should behave regarding the branches. By default, all models defined in the schema will be **branch aware** which means that any changes to an object based on a **branch aware** model will be applied only to the branch and can be integrated into the main branch via a Propose Change. -It's now possible to also configure a model, an attribute or a relationship as : +It's now possible to also configure a model, an attribute or a relationship as: + - **branch agnostic**: All changes to an object based on a **branch agnostic** model will automatically be available in all branches. - **branch local**: All changes will stay local to the branch. A model in **branch local** mode will not be affected by the Diff and the Merge. - -### Object Storage +### Object storage A new object store has been introduced to easily store and retrieve files in an object storage. The object storage interface is independent of the branches. @@ -52,27 +57,28 @@ Currently only a local backend is supported but the goal over time is to support The Python SDK now support more granular queries by introducing the support for `include` and `exclude` parameters on all methods to query objects from Infrahub. -### Architecture Change +### Architecture change Several changes to the Architecture have been introduced to prepare the deployment of Infrahub in a production environment: + - The frontend container has been removed and the frontend is now being served from the same endpoint as the backend (http://localhost:8000 by default). - It's now possible to run multiple Git Agents, to increase the number of asynchronous tasks that Infrahub can process at the same time. To support that a new cache container has been introduced. -### Other Changes +## Other changes - Add OpenTelemetry - Add GraphQL Query Analyzer @dgarros (#966) -- Replace graphql playground with Graphiql @morganpartee (#1024) +- Replace GraphQL playground with Graphiql @morganpartee (#1024) - Add Links in the footer - Convert all UUID to Temporal UUID @dgarros (#936) -## Migration Guide +## Migration guide ### Rebuild the demo environment It's mandatory to completely rebuild your demo environment with the following commands. -``` +```sh invoke demo.destroy demo.build demo.init demo.start invoke demo.load-infra-schema invoke demo.load-infra-data @@ -113,18 +119,18 @@ it's recommend to pull the latest changes into your fork. ### 🐛 Bug Fixes -- Fix tooltips display + delay @pa-lem (#1029) +- Fix tool-tips display + delay @pa-lem (#1029) - Remove disclosure component and use custom one @pa-lem (#1023) - Diff responsive UI @pa-lem (#1011) - Fix approve mutation + add merge button @pa-lem (#1004) -- Artifacts diff url @pa-lem (#1005) +- Artifacts diff URL @pa-lem (#1005) - Abort merge if the operation wasn't successful @ogenstad (#980) - Validate values during creation of attributes @ogenstad (#942) ### 🧰 Maintenance - Modify Data Integrity check to report progress @ogenstad (#1041) -- Change rpc callback function to be async @ogenstad (#1016) +- Change RPC callback function to be async @ogenstad (#1016) - Restrict available namespaces for user schemas @ogenstad (#995) - Exclude Checks and Validators from the menu and rename Blacklist to Excludelist @dgarros (#984) - Remove test\_client argument and functionality from SDK @ogenstad (#986) diff --git a/docs/retype.yml b/docs/retype.yml index ba3246c2c1..ad8dec56bf 100644 --- a/docs/retype.yml +++ b/docs/retype.yml @@ -1,7 +1,7 @@ --- input: . output: .retype -url: # Add your website address here +url: docs.infrahub.app branding: title: Infrahub label: Docs diff --git a/docs/schema/attribute.j2 b/docs/schema/attribute.j2 deleted file mode 100644 index 2980d6575f..0000000000 --- a/docs/schema/attribute.j2 +++ /dev/null @@ -1,37 +0,0 @@ ---- -label: Attribute -layout: default -order: 800 ---- -{% macro attribute_constraints(attr) -%} -{% if attr.regex %} Regex: `{{attr.regex}}`{% endif %}{% if attr.regex or attr.min_length or attr.max_length %}
{% endif %}{% if attr.min_length or attr.max_length %} Length: min {{attr.min_length | default("-")}}, max {{attr.min_length | default("-")}}{% endif %} -{%- endmacro %} -# Attribute - -In a schema file, an attribute can be defined inside a `node` or inside a `node extension`. - -Below is the list of all available options to define an Attribute in the schema -{% for attr in schema.nodes[1].attributes -%} -## {{ attr.name }} - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | {{ attr.name }} | -| **Kind** | `{{ attr.kind }}` | -| **Description** | {{ attr.description }} | -| **Constraints** | {{attribute_constraints(attr)}} | -{% if attr.enum -%} -| **Accepted Values** | {% for value in attr.enum %}`{{ value }}` {% endfor %} | -{%- endif %} - -{% endfor %} -{% for rel in schema.nodes[1].relationships -%} -## {{ rel.name }} - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | {{ rel.name }} | -| **Kind** | {% if rel.cardinality == "one" %}`Object`{%else%}`List`{%endif%} | -| **Description** | {{ rel.description }} | - -{% endfor %} diff --git a/docs/schema/attribute.md b/docs/schema/attribute.md deleted file mode 100644 index 8fbef3f2ba..0000000000 --- a/docs/schema/attribute.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -label: Attribute -layout: default -order: 800 ---- - -# Attribute - -In a schema file, an attribute can be defined inside a `node` or inside a `node extension`. - -Below is the list of all available options to define an Attribute in the schema -## name - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | name | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min 3, max 3 | - - -## namespace - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | namespace | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | Regex: `^[A-Z][a-zA-Z0-9]+$`
Length: min 3, max 3 | - - -## kind - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | kind | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min 3, max 3 | -| **Accepted Values** | `ID` `Text` `TextArea` `DateTime` `Email` `Password` `HashedPassword` `URL` `File` `MacAddress` `Color` `Number` `Bandwidth` `IPHost` `IPNetwork` `Checkbox` `List` `JSON` `Any` `String` `Integer` `Boolean` | - -## enum - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | enum | -| **Kind** | `List` | -| **Description** | | -| **Constraints** | | - - -## regex - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | regex | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | | - - -## max_length - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | max_length | -| **Kind** | `Number` | -| **Description** | | -| **Constraints** | | - - -## min_length - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | min_length | -| **Kind** | `Number` | -| **Description** | | -| **Constraints** | | - - -## label - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | label | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min -, max - | - - -## description - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | description | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min -, max - | - - -## unique - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | unique | -| **Kind** | `Boolean` | -| **Description** | | -| **Constraints** | | - - -## optional - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | optional | -| **Kind** | `Boolean` | -| **Description** | | -| **Constraints** | | - - -## branch - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | branch | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | | -| **Accepted Values** | `aware` `agnostic` `local` | - -## order_weight - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | order_weight | -| **Kind** | `Number` | -| **Description** | | -| **Constraints** | | - - -## default_value - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | default_value | -| **Kind** | `Any` | -| **Description** | | -| **Constraints** | | - - -## inherited - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | inherited | -| **Kind** | `Boolean` | -| **Description** | | -| **Constraints** | | - - - -## node - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | node | -| **Kind** | `Object` | -| **Description** | | - diff --git a/docs/schema/core-schema.j2 b/docs/schema/core-schema.j2 deleted file mode 100644 index 7280eea201..0000000000 --- a/docs/schema/core-schema.j2 +++ /dev/null @@ -1,29 +0,0 @@ -{%- macro attribute_constraints(attr) -%} -{% if attr.regex %} Regex: `{{attr.regex}}`{% endif %}{% if attr.regex or attr.min_length or attr.max_length %}
{% endif %}{% if attr.min_length or attr.max_length %} Length: min {{attr.min_length | default("-")}}, max {{attr.min_length | default("-")}}{% endif %} -{%- endmacro -%} -# Schema - -## Node, Attributes and Relationships - - -| Name | Description | Kind | Constraints | -| -- | -- | -- | -- | -- | -{% for attr in schema.nodes[0].attributes -%} -| {{ attr.name }} | {{ attr.description }} | {{ attr.kind }} | {{attribute_constraints(attr)}} | -{% endfor %} - - -### Attribute Kind - -| -| - - - -### Relationship Kind - - - - -## Generics & Groups - diff --git a/docs/schema/generic.j2 b/docs/schema/generic.j2 deleted file mode 100644 index e9cb7bea58..0000000000 --- a/docs/schema/generic.j2 +++ /dev/null @@ -1,36 +0,0 @@ ---- -label: Generic -layout: default -order: 600 ---- -{% macro attribute_constraints(attr) -%} -{% if attr.regex %} Regex: `{{attr.regex}}`{% endif %}{% if attr.regex or attr.min_length or attr.max_length %}
{% endif %}{% if attr.min_length or attr.max_length %} Length: min {{attr.min_length | default("-")}}, max {{attr.min_length | default("-")}}{% endif %} -{%- endmacro %} -# Generic - -Below is the list of all available options to define a Generic in the schema - -{% for attr in schema.nodes[2].attributes -%} -## {{ attr.name }} - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | {{ attr.name }} | -| **Kind** | `{{ attr.kind }}` | -| **Description** | {{ attr.description }} | -| **Constraints** | {{attribute_constraints(attr)}} | -{% if attr.enum -%} -| **Accepted Values** | {% for value in attr.enum %}`{{ value }}` {% endfor %} | -{%- endif %} - -{% endfor %} -{% for rel in schema.nodes[2].relationships -%} -## {{ rel.name }} - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | {{ rel.name }} | -| **Kind** | {% if rel.cardinality == "one" %}`Object`{%else%}`List`{%endif%} | -| **Description** | {{ rel.description }} | - -{% endfor %} \ No newline at end of file diff --git a/docs/schema/generic.md b/docs/schema/generic.md deleted file mode 100644 index 28da08a5c7..0000000000 --- a/docs/schema/generic.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -label: Generic -layout: default -order: 600 ---- - -# Generic - -Below is the list of all available options to define a Generic in the schema - -## name - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | name | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min 3, max 3 | - - -## peer - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | peer | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | Regex: `^[A-Z][a-zA-Z0-9]+$`
Length: min 3, max 3 | - - -## kind - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | kind | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | | -| **Accepted Values** | `Generic` `Attribute` `Component` `Parent` `Group` | - -## label - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | label | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min -, max - | - - -## description - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | description | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min -, max - | - - -## identifier - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | identifier | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min -, max - | - - -## cardinality - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | cardinality | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | | -| **Accepted Values** | `one` `many` | - -## order_weight - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | order_weight | -| **Kind** | `Number` | -| **Description** | | -| **Constraints** | | - - -## optional - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | optional | -| **Kind** | `Boolean` | -| **Description** | | -| **Constraints** | | - - -## branch - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | branch | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | | -| **Accepted Values** | `aware` `agnostic` `local` | - -## inherited - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | inherited | -| **Kind** | `Boolean` | -| **Description** | | -| **Constraints** | | - - - -## node - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | node | -| **Kind** | `Object` | -| **Description** | | - diff --git a/docs/schema/node-extension.md b/docs/schema/node-extension.md deleted file mode 100644 index 4f341969e7..0000000000 --- a/docs/schema/node-extension.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -label: Node Extension -layout: default -order: 850 ---- - -# Node Extension - -Below is the list of all available options to define a node extension in the schema - -## kind - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | kind | -| **Kind** | `Text` | -| **Description** | Node kind, must be unique and must be in CamelCase | -| **Constraints** | Regex: `^[A-Z][a-zA-Z0-9]+$`
Lenght: min 3, max 3 | - -## attributes - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | attributes | -| **Kind** | `List` | -| **Description** | | - -## relationships - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | relationships | -| **Kind** | `List` | -| **Description** | | - diff --git a/docs/schema/node.j2 b/docs/schema/node.j2 deleted file mode 100644 index c910fed0b9..0000000000 --- a/docs/schema/node.j2 +++ /dev/null @@ -1,35 +0,0 @@ ---- -label: Node -layout: default -order: 900 ---- -{% macro attribute_constraints(attr) -%} -{% if attr.regex %} Regex: `{{attr.regex}}`{% endif %}{% if attr.regex or attr.min_length or attr.max_length %}
{% endif %}{% if attr.min_length or attr.max_length %} Length: min {{attr.min_length | default("-")}}, max {{attr.min_length | default("-")}}{% endif %} -{%- endmacro %} -# Node - -Below is the list of all available options to define a node in the schema -{% for attr in schema.nodes[0].attributes -%} -## {{ attr.name }} - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | {{ attr.name }} | -| **Kind** | `{{ attr.kind }}` | -| **Description** | {{ attr.description }} | -| **Constraints** | {{attribute_constraints(attr)}} | -{% if attr.enum -%} -| **Accepted Values** | {% for value in attr.enum %}`{{ value }}` {% endfor %} | -{%- endif %} - -{% endfor %} -{% for rel in schema.nodes[0].relationships -%} -## {{ rel.name }} - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | {{ rel.name }} | -| **Kind** | {% if rel.cardinality == "one" %}`Object`{%else%}`List`{%endif%} | -| **Description** | {{ rel.description }} | - -{% endfor %} diff --git a/docs/schema/node.md b/docs/schema/node.md deleted file mode 100644 index db38005159..0000000000 --- a/docs/schema/node.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -label: Node -layout: default -order: 900 ---- - -# Node - -Below is the list of all available options to define a node in the schema -## name - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | name | -| **Kind** | `Text` | -| **Description** | Node name, must be unique and must be all lowercase. | -| **Constraints** |
Length: min 2, max 2 | - - -## namespace - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | namespace | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | Regex: `^[A-Z][a-zA-Z0-9]+$`
Length: min 3, max 3 | - - -## label - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | label | -| **Kind** | `Text` | -| **Description** | Human friendly representation of the name/kind | -| **Constraints** |
Length: min -, max - | - - -## description - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | description | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min -, max - | - - -## branch - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | branch | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | | -| **Accepted Values** | `aware` `agnostic` `local` | - -## default_filter - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | default_filter | -| **Kind** | `Text` | -| **Description** | Default filter used to search for a node in addition to its ID. | -| **Constraints** | | - - -## display_labels - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | display_labels | -| **Kind** | `List` | -| **Description** | List of attributes to use to generate the display label | -| **Constraints** | | - - -## order_by - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | order_by | -| **Kind** | `List` | -| **Description** | List of attributes to use to order the results by default | -| **Constraints** | | - - -## inherit_from - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | inherit_from | -| **Kind** | `List` | -| **Description** | List of Generic Kind that this node is inheriting from | -| **Constraints** | | - - -## groups - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | groups | -| **Kind** | `List` | -| **Description** | List of Group that this node is part of | -| **Constraints** | | - - - -## attributes - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | attributes | -| **Kind** | `List` | -| **Description** | | - -## relationships - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | relationships | -| **Kind** | `List` | -| **Description** | | - diff --git a/docs/schema/relationship.j2 b/docs/schema/relationship.j2 deleted file mode 100644 index 6287ce74f9..0000000000 --- a/docs/schema/relationship.j2 +++ /dev/null @@ -1,38 +0,0 @@ ---- -label: Relationship -layout: default -order: 700 ---- -{% macro attribute_constraints(attr) -%} -{% if attr.regex %} Regex: `{{attr.regex}}`{% endif %}{% if attr.regex or attr.min_length or attr.max_length %}
{% endif %}{% if attr.min_length or attr.max_length %} Length: min {{attr.min_length | default("-")}}, max {{attr.min_length | default("-")}}{% endif %} -{%- endmacro %} -# Relationship - -In a schema file, a relationship can be defined inside a `node` or inside a `node extension`. - -Below is the list of all available options to define a Relationship in the schema - -{% for attr in schema.nodes[2].attributes -%} -## {{ attr.name }} - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | {{ attr.name }} | -| **Kind** | `{{ attr.kind }}` | -| **Description** | {{ attr.description }} | -| **Constraints** | {{attribute_constraints(attr)}} | -{% if attr.enum -%} -| **Accepted Values** | {% for value in attr.enum %}`{{ value }}` {% endfor %} | -{%- endif %} - -{% endfor %} -{% for rel in schema.nodes[2].relationships -%} -## {{ rel.name }} - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | {{ rel.name }} | -| **Kind** | {% if rel.cardinality == "one" %}`Object`{%else%}`List`{%endif%} | -| **Description** | {{ rel.description }} | - -{% endfor %} \ No newline at end of file diff --git a/docs/schema/relationship.md b/docs/schema/relationship.md deleted file mode 100644 index f99b2d4da9..0000000000 --- a/docs/schema/relationship.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -label: Relationship -layout: default -order: 700 ---- - -# Relationship - -In a schema file, a relationship can be defined inside a `node` or inside a `node extension`. - -Below is the list of all available options to define a Relationship in the schema - -## name - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | name | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min 3, max 3 | - - -## peer - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | peer | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | Regex: `^[A-Z][a-zA-Z0-9]+$`
Length: min 3, max 3 | - - -## kind - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | kind | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | | -| **Accepted Values** | `Generic` `Attribute` `Component` `Parent` `Group` | - -## label - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | label | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min -, max - | - - -## description - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | description | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min -, max - | - - -## identifier - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | identifier | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** |
Length: min -, max - | - - -## cardinality - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | cardinality | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | | -| **Accepted Values** | `one` `many` | - -## order_weight - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | order_weight | -| **Kind** | `Number` | -| **Description** | | -| **Constraints** | | - - -## optional - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | optional | -| **Kind** | `Boolean` | -| **Description** | | -| **Constraints** | | - - -## branch - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | branch | -| **Kind** | `Text` | -| **Description** | | -| **Constraints** | | -| **Accepted Values** | `aware` `agnostic` `local` | - -## inherited - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | inherited | -| **Kind** | `Boolean` | -| **Description** | | -| **Constraints** | | - - - -## node - -| -- | -- | { class="compact" } -| ---- | --------------- | -| **Name** | node | -| **Kind** | `Object` | -| **Description** | | - diff --git a/docs/topics/architecture.md b/docs/topics/architecture.md new file mode 100644 index 0000000000..65063b0416 --- /dev/null +++ b/docs/topics/architecture.md @@ -0,0 +1,65 @@ +--- +label: Architecture +layout: default +--- +# Architecture diagram + +![](../media/high_level_architecture.excalidraw.svg) + +## Infrahub components + +### API server + +Language: Python + +The API server delivers the REST API and the GraphQL endpoints. +Internally, the API server is built with FastAPI as the web framework and Graphene to generate the GraphQL endpoints. + +!!! + +Multiple instance of the API Server can run at the same time to process more requests. + +!!! + +### Git agent + +Language: Python + +The Git agent is responsible for managing all the content related to the Git repositories. It organizes the file systems in order to quickly access any relevant commit. The Git Agent periodically pulls the Git server for updates and listens to the RPC channel on the event bus for tasks to execute. + +Some of the tasks that can be executed on the Git agent includes: + +- Rendering a Jinja template. +- Rendering a transform function. +- Executing a check. +- All Git operations (pull/merge/diff). + +!!! + +Multiple instance of the Git agent can run at the same time to process more requests. + +!!! + +### Frontend + +Language: React + +## External systems + +### Graph database + +The Graph database is based on Bolt and Cypher. Currently, we have validated both Neo4j 5.x and Memgraph as possible options. +Neo4j is a production grade, battle tested graph database that is used in thousands of deployments around the world. +Memgraph is a lightweight, very fast, in-memory database that works great for testing and demos. + +### Message bus + +The message bus is based on RabbitMQ. It supports both a fanout channel to distribute messages to all members at the same time and a RPC framework to distribute work synchronously. + +### Cache + +The cache is based on Redis. It's mainly used as a central point to support the distributed lock systems between all the different component of the system. + +### Git server (GitHub/GitLab) + +Any Git server. The most popular being: GitHub, GitLab, or Bitbucket. diff --git a/docs/topics/artifact.md b/docs/topics/artifact.md new file mode 100644 index 0000000000..a4b539c572 --- /dev/null +++ b/docs/topics/artifact.md @@ -0,0 +1,81 @@ +--- +label: Artifact +layout: default +--- + +# Artifact + +An artifact is the result of a [Transformation](./transformation.md) for a specific context and/or object. It can be either plain text or JSON format. + +!!!success Examples + +- For a network device, you can use an artifact to track the configuration generated from a Jinja template (RFile). +- For a Security Device, an artifact can be the list of rules in JSON in the format of your choice generated by a Python Transformation. +- An artifact can also represent the configuration of a DNS server or the configuration of a specific Virtual IP on a load balancer. + +!!! + +While it's always possible to generate [Transformations](./transformation.md) on demand via the API, having an Artifact provide some additional benefits: + +- **Caching**: Generated artifacts are stored in the internal [object storage](./object-storage.md). For resource intensive transformations, it will significantly reduce the load of the system if an artifact can be serve from the cache instead of regenerating each time. +- **Traceability**: Past values of an artifact remain available. In a future release, it will be possible to compare the value of an artifact over time. +- **Peer Review**: Artifacts are automatically part of the [Proposed Change](./proposed-change.md) review process. + +While the content of an artifact can change, its identifier will remain the same over time. + +## High level design + +Artifacts are defined by grouping a [transformation](./transformation.md) with a group of targets in an *Artifact Definition*. + +An **artifact definition** centralizes all the information required to generate an artifact. + +- Group of targets +- Transformation +- Format of the output +- Information to extract from each target that must be passed to the transformation. + +![](../media/artifact.excalidraw.svg) + +## Creating an artifact definition + +Artifact definitions can be created via the frontend, via GraphQL or via a Git repository + +For Infrahub to automatically import an artifact definition from a repository, it must be declared in the `.infrahub.yml` file at the root of the repository under the key `artifact_definitions`. + +```yaml +--- +artifact_definitions: + - name: "" + transformation: "" +``` + +You can access an artifact via the frontend or GraphQL, but you shouldn't manually create them. Infrahub should generate and manage all artifacts. + +## Examples + +### Startup configuration for edge devices + +The project [`infrahub-demo-edge`](https://github.com/opsmill/infrahub-demo-edge) includes most elements required to generate the startup configuration of all edge devices. + +In the `.infrahub.yml` the artifact definition is configured as follows: + +```yaml +artifact_definitions: + - name: "Startup Config for Edge devices" + artifact_name: "startup-config" + parameters: + device: "name__value" + content_type: "text/plain" + targets: "edge_router" + transformation: "device_startup" +``` + +- `transformation: "device_startup"` references the transformation RFile and defines it in the same repository. +- The GraphQLQuery `device_startup_info` is indirectly connected to the artifact definition via the transformation. +- `targets: "edge_router"` references a group of Edge routers named `edge_router`. It must be already present in Infrahub. +- `parameters` define the information that must be extracted from each member of the group and that must be passed to the transformation. Here, the transformation `device_startup` must have a parameter `device` (coming from the GraphQL Query) to render the configuration properly. The value of `device` for each member of the group will be constructed by accessing the value of the name `name__value`. diff --git a/docs/topics/auth.md b/docs/topics/auth.md new file mode 100644 index 0000000000..d35feac5a0 --- /dev/null +++ b/docs/topics/auth.md @@ -0,0 +1,39 @@ +--- +label: User management and authentication +layout: default +--- + +# User management and authentication + +Infrahub now supports standard user management and authentication systems. + +A user account can have 3 levels of permissions + +- `admin` +- `read-write` +- `read-only` + +By default, Infrahub will allow anonymous access in read-only. It's possible to disable this via the configuration `main.allow_anonymous_access` or via the environment variable `INFRAHUB_ALLOW_ANONYMOUS_ACCESS`. + +## Authentication mechanisms + +Infrahub supports two authentication methods + +- JWT token: Short life tokens generated on demand from the API. +- API Token: Long life tokens generated ahead of time. + +> API tokens can be generated via the user profile page or via the GraphQL interface. + +| | JWT | TOKEN | +| ------------------ | ---- | ----- | +| API / GraphQL | Yes | Yes | +| Frontend | Yes | No | +| Python SDK | Soon | Yes | +| infrahubctl | Soon | Yes | +| GraphQL Playground | No | Yes | + +!!! + +While using the API, the authentication token must be provided in a header named `X-INFRAHUB-KEY`. + +!!! diff --git a/docs/knowledge-base/graphql.md b/docs/topics/graphql.md similarity index 67% rename from docs/knowledge-base/graphql.md rename to docs/topics/graphql.md index 53a0bdf771..3b68da1013 100644 --- a/docs/knowledge-base/graphql.md +++ b/docs/topics/graphql.md @@ -1,26 +1,26 @@ --- -label: GraphQL Query +label: GraphQL queries layout: default -order: 800 --- # GraphQL -The GraphQL interface is the main interface to interact with Infrahub, the GraphQL Schema is automatically generated based on the core models and the user-defined schema models. +The GraphQL interface is the main interface to interact with Infrahub. The GraphQL schema is automatically generated based on the core models and the user-defined schema models. -The endpoint to interact with the main branch is accessible at `https:///graphql`. -To interact with a branch the url must include the name of the branch. `https:///graphql/` +The endpoint to interact with the main branch is accessible at `https:///graphql`. +To interact with a branch the URL must include the name of the branch, such as `https:///graphql/`. -## Query & Mutations +## Query & mutations -For each model in the schema, a GraphQL Query and 3 Mutations will be generated based on the namespace and the name of the model. +For each model in the schema, a GraphQL query and 3 mutations will be generated based on the namespace and the name of the model. + +For example, for the model `CoreRepository` the following query and mutations have been generated: -For example, for the model `CoreRepository` the following Query and Mutations have been generated: - `Query` : **CoreRepository** - `Mutation` : **CoreRepositoryCreate** - `Mutation` : **CoreRepositoryUpdate** - `Mutation` : **CoreRepositoryDelete** -### Query Format +### Query format The top level query for each model will always return a list of objects and the query will have the following format `CoreRepository` > `edges` > `node` > `display_label` @@ -43,22 +43,24 @@ query { All list of objects will be nested under `edges` & `node` to make it possible to control the pagination and access the attribute `count`. !!! -##### `ID` and `display_label` +#### `ID` and `display_label` + For all nodes, the attribute `id` and `display_label` are automatically available. The value used to generate the `display_label` can be defined for each model in the schema. If no value has been provided a generic display label with the kind and the ID of the Node will be generated. At the object level, there are mainly 3 types of resources that can be accessed, each with a different format: + - `Attribute` - `Relationship` of `Cardinality One` - `Relationship` of `Cardinality Many` #### Attribute -Each Attribute is its own object in GraphQL to expose the value and all the metadata. +Each attribute is its own object in GraphQL to expose the value and all the metadata. -In the query below, to access the attribute **name** of the object the query must be `CoreRepository` > `edges` > `node` > `name` > `value`. -At the same level all the metadata of the attribute are also available example : `is_protected`, `is_visible`, `source` & `owner` +In the query below, to access the attribute **name** of the object the query must be `CoreRepository` > `edges` > `node` > `name` > `value`. +At the same level all the metadata of the attribute are also available example: `is_protected`, `is_visible`, `source` & `owner` -```graphql #6-14 Example query to access the value and the properties of the Attribute 'name' +```graphql #6-14 Example query to access the value and the properties of the attribute 'name' query { CoreRepository { count @@ -81,7 +83,7 @@ query { #### Relationship of `Cardinality One` -A Relationship to another model with a cardinality of `One` will be represented with a `NestedEdged` object composed of a `node` and a `properties` objects. The `node` gives access to the remote `node` (the peer of the relationship) while `properties` gives access to the properties of the relationship itself. +A relationship to another model with a cardinality of `One` will be represented with a `NestedEdged` object composed of a `node` and a `properties` objects. The `node` gives access to the remote `node` (the peer of the relationship) while `properties` gives access to the properties of the relationship itself. ```graphql #6-19 Example query to access the peer and the properties of the relationship 'account', with a cardinality of one. query { @@ -111,7 +113,7 @@ query { #### Relationship of `Cardinality Many` -A Relationship with a cardinality of `Many` will be represented with a `NestedPaginated` object composed. It was the same format as the top level `PaginatedObject` with `count` and `edges` but the child element will expose both `node` and `properties`. The `node` gives access to the remote `node` (the peer of the relationship) while `properties` gives access to the properties of the relationship itself. +A relationship with a cardinality of `Many` will be represented with a `NestedPaginated` object composed. It was the same format as the top level `PaginatedObject` with `count` and `edges` but the child element will expose both `node` and `properties`. The `node` gives access to the remote `node` (the peer of the relationship) while `properties` gives access to the properties of the relationship itself. ```graphql #6-20 Example query to access the relationship 'tags', with a cardinality of Many. query { @@ -140,27 +142,29 @@ query { } ``` -### Mutations Format +### Mutations format + +The format of the mutation to `Create` and `Update` an object has some similarities with the query format. The format will be slightly different for: -The format of the Mutation to Create & Update an object have some similarities with the Query format and similartly, the format will be slightly different for : - An `Attribute` -- A Relationship of `Cardinality One` -- A Relationship of `Cardinality Many` +- A relationship of `Cardinality One` +- A relationship of `Cardinality Many` + +#### Create and update -#### Create & Update +To `Create` or `Update` an object, the mutations will have the following properties. -To `Create` or `Update` an object, the mutations will have the following properties -- The Input for the mutation must be provided inside `data` +- The input for the mutation must be provided inside `data`. - All mutations will return `ok` and `object` to access some information after the mutation has been executed. -- For `Update`, it is mandatory to provide an `id` +- For `Update`, it is mandatory to provide an `id`. ```graphql mutation { CoreRepositoryCreate( data: { name: { value: "myrepop" }, # Attribute - location: { value: "myrepop" }, # Attribute - account: { id: "myaccount" }, # Relationship One + location: { value: "myrepop" }, # Attribute + account: { id: "myaccount" }, # Relationship One tags: [ { id: "my_id" } ]} # Relationship Many ) { ok @@ -171,39 +175,37 @@ mutation { } ``` -## Branch Management +## Branch management -In addition to the Query and the Mutations automatically generated based on the schema, there are some Query and Mutations to interact with the Branches. +In addition to the queries and the mutations automatically generated based on the schema, there are some queries and mutations to interact with the branches. - **Query**: `Branch`, Query a list of all branches - **Mutation**: `BranchCreate`, Create a new branch -- **Mutation**: `BranchUpdate`, Update the descrition of a branch -- **Mutation**: `BranchDelete`, Delete an existing Branch -- **Mutation**: `BranchRebase`, Rebase an existing Branch with the main Branch -- **Mutation**: `BranchMerge`, Merge a Branch into main +- **Mutation**: `BranchUpdate`, Update the description of a branch +- **Mutation**: `BranchDelete`, Delete an existing branch +- **Mutation**: `BranchRebase`, Rebase an existing branch with the main branch +- **Mutation**: `BranchMerge`, Merge a branch into main - **Mutation**: `BranchValidate`, Validate if a branch has some conflicts - ## GraphQLQuery -The GraphQLQuery Model has been designed to store a GraphQL Query in order to simplify its execution and to associate it with other internal objects like `Transformation`. +The `GraphQLQuery` model has been designed to store a GraphQL query in order to simplify its execution and to associate it with other internal objects like `Transformation`. -A GraphQLQuery object can be created directly from the API or it can be imported from a Git Repository. +A `GraphQLQuery` object can be created directly from the API or it can be imported from a Git repository. -Every time a GraphQLQuery is being Created or Updated, the content of the query will be analized to -- Ensure the query is valid and is compatible with the schema -- Extract some information about the query itself (see below) +Every time a `GraphQLQuery` is created or updated, the content of the query will be analyzed to: + +- Ensure the query is valid and compatible with the schema. +- Extract some information about the query itself (see below). ### Information extracted from the query -- Type of Operations present in the Query [Query, Mutation, Subscription] + +- Type of operations present in the Query [Query, Mutation, Subscription] - Variables accepted by the query - Depth, number of nested levels in the query - Height, total number of fields requested in the query - List of Infrahub models referenced in the query -### Import from a Git Repository - -The Git Agent will automatically try to import all files with the extension `.gql` into a GraphQLQuery with the name of the file as the name of the query. - - +### Import from a git repository +The git agent will automatically try to import all files with the extension `.gql` into a `GraphQLQuery` with the name of the file as the name of the query. diff --git a/docs/topics/index.yml b/docs/topics/index.yml new file mode 100644 index 0000000000..37b6379ae6 --- /dev/null +++ b/docs/topics/index.yml @@ -0,0 +1,4 @@ +--- +label: Topics +icon: "book" +order: 800 diff --git a/docs/topics/local-demo-environment.md b/docs/topics/local-demo-environment.md new file mode 100644 index 0000000000..55d64902c2 --- /dev/null +++ b/docs/topics/local-demo-environment.md @@ -0,0 +1,116 @@ +--- +label: Demo environment +layout: default +--- +# Local demo environment + +A local environment based on Docker Compose is available for demo and testing. +It's designed to be controlled by `invoke` using a list of predefined commands. + +{ class="compact" } +| Command | Description | +| ------------------------ | ------------------------------------------------------------------------- | +| `demo.build` | Build an image with the provided name and Python version. | +| `demo.init` | (deprecated) Initialize Infrahub database before using it the first time. | +| `demo.start` | Start a local instance of Infrahub within docker compose. | +| `demo.stop` | Stop the running instance of Infrahub. | +| `demo.destroy` | Destroy all containers and volumes. | +| `demo.cli-git` | Launch a bash shell inside the running Infrahub container. | +| `demo.cli-server` | Launch a bash shell inside the running Infrahub container. | +| `demo.debug` | Start a local instance of Infrahub in debug mode. | +| `demo.status` | Display the status of all containers. | +| `demo.load-infra-schema` | Load the `infrastructure_base` schema into Infrahub. | +| `demo.load-infra-data` | Generate some data representing a small network with 6 devices. | + +## Topology + +{ class="compact" } +| Container Name | Image | Description | +| ------------------- | ------------------------------------------------------ | ------------------------------------------------------ | +| **database** | memgraph/memgraph
or
neo4j:community | Graph Database | +| **message-queue** | rabbitmq:3.12-management | Message bus based on RabbitMQ | +| **cache** | redis:7.2 | Cache based on Redis, mainly used for distributed lock | +| **infrahub-server** | Dockerfile | Instance of the API server, running GraphQL | +| **infrahub-git** | Dockerfile | Instance of the Git agent, managing the Git Repository | +| **frontend** | Dockerfile | Instance of the Frontend | + +[!ref Check the architecture diagram to have more information about each component](./architecture.md) + +## Getting started + +### Prerequisites + +In order to run the demo environment, the following applications must be installed on the systems: + +- [pyinvoke](https://www.pyinvoke.org/) +- Docker & Docker Compose + +> On a Laptop, both Docker & Docker Compose can be installed by installing [Docker Desktop](https://www.docker.com/products/docker-desktop/). + +### First utilization + +Before the first utilization you need to build the images for Infrahub with the command: + +```sh +invoke demo.build +``` + +Initialize the database and start the application + +```sh +invoke demo.start +``` + +### Load some data + +Once you have an environment up and running you can load your own schema or you can explore the one provided with the project using the following commands. + +```sh +invoke demo.load-infra-schema +invoke demo.load-infra-data +``` + +### Control the local environment + +- `invoke demo.start` : Start all the containers in detached mode. +- `invoke demo.stop` : Stop All the containers +- `invoke demo.destroy` : Destroy all containers and volumes. + +!!! + +`invoke demo.debug` can be used as an alternative to `invoke demo.start`, the main difference is that it will stay *attached* to the containers and all the logs will be displayed in real time in the CLI. + +!!! + +## Advanced settings + +### Support for `sudo` + +On a Linux system, the system will try to automatically detect if `sudo` is required to run the docker command or not. + +It's possible to control this setting with the environment variable: `INVOKE_SUDO` + +```sh +export INVOKE_SUDO=1 to force sudo +export INVOKE_SUDO=0 to disable it completely +``` + +### Support for `pty` + +On Linux and MacOS, all commands will be executed with PTY enabled by default. + +It's possible to control this setting with the environment variable: `INVOKE_PTY` + +```sh +export INVOKE_PTY=1 to force pty +export INVOKE_PTY=0 to disable it completely +``` + +## Troubleshooting + +It's recommended to check if all containers are still running using `invoke demo.status`. The 5 containers should be running and be present. + +- If one is not running, you can try to restart it with `invoke demo.start`. +- If the container is still not coming up, you can watch the logs with `docker logs ` (the container name will include the name of the project and a number, i.e., `infrahub-dev-infrahub-git-1` ). + +If some containers are still not coming up, it's recommended to start from a fresh install with `invoke demo.destroy`. diff --git a/docs/topics/object-storage.md b/docs/topics/object-storage.md new file mode 100644 index 0000000000..bf11384b94 --- /dev/null +++ b/docs/topics/object-storage.md @@ -0,0 +1,17 @@ +--- +label: Object storage +layout: default +--- +# Object storage + +Infrahub provides an interface to store and retrieve files in an object storage. The object storage interface is independent of the branches. + +Currently, Infrahub only supports a local backend. The goal over time is to support multiple backends, such as AWS S3, to allow users to select where they would like to store their files. + +Currently the main interface to interact with the object storage is the REST API. 3 methods are supported: + +- GET /api/storage/object/{identifier} +- POST /api/storage/upload/content +- POST /api/storage/upload/file + +Please check the API documentation for more details. diff --git a/docs/knowledge-base/proposed-change.md b/docs/topics/proposed-change.md similarity index 73% rename from docs/knowledge-base/proposed-change.md rename to docs/topics/proposed-change.md index ef5907cb17..3345a04a5d 100644 --- a/docs/knowledge-base/proposed-change.md +++ b/docs/topics/proposed-change.md @@ -1,18 +1,27 @@ --- -label: Proposed Change +label: Proposed change layout: default -order: 150 --- +# Proposed change + A proposed change provides a way to review and discuss how two branches differ from each other and to merge a source branch into the target branch. For people with a development background, this will sound very familiar. It’s like a pull or merge request. The proposed change lets you compare two branches, run tests, and finally merge one branch into another. + ## Discussions and issues as part of the review -A reviewer of the proposed change can open discussions and write comments, request changes. Once you resolve any requested change, the reviewer would approve the proposed change before they merge it. + +A reviewer of the proposed change can open discussions, write comments, and request changes. Once you resolve any requested change, the reviewer would approve the proposed change before they merge it. + ## An alternative approach to diff -In a pull request in GitHub, the diff between two branches is a diff seen from a plain text point of view. A proposed change in Infrahub allows you to see changes in data, as well as the type of diff you’d see in Git. By combining the two, someone reviewing a proposed change in Infrahub can view the diff between [artifacts](artifact) on each branch. + +In a pull request on GitHub, the diff between two branches is seen from a plain text point of view. A proposed change in Infrahub allows you to see changes in data, as well as the type of diff you’d see in Git. By combining the two, someone reviewing a proposed change in Infrahub can view the diff between [artifacts](artifact) on each branch. + With this feature, you can create a new branch, change a node attribute in the database, and see how your modifications impact the rendered artifacts. It includes a diff view to see exactly how a configuration might change if the proposed change were to be accepted and merged. -## Continuous Integration - CI +## Continuous integration - CI + Just like you’d expect for a GitHub pull request, you can run checks on a proposed change during the review process and before merging. Infrahub will run data integrity checks between the proposed change branches. Besides this, Infrahub reports any merge conflicts for connected Git repositories. + Infrahub handles custom checks with code through Git repositories. These Checks let you verify the integrity of the database using your custom business logic. A check of this type could be anything you can imagine. An example could be to ensure that at least one router on each site is in an operational status as opposed to being in maintenance mode. ## Conflict resolution -Infrahub will prevent merging a proposed change if there is a data conflict between the branches. An example of such a conflict could be if someone were to update the same attribute of an object in both branches. In order to merge a proposed change that has conflicts, they need to be resolved. To resolve conflicts, you need to review data integrity checks and choose which branch to keep in the change checks section. \ No newline at end of file + +Infrahub will prevent merging a proposed change if there is a data conflict between the branches. An example of such a conflict could be if someone were to update the same attribute of an object in both branches. In order to merge a proposed change that has conflicts, they need to be resolved. To resolve conflicts, you need to review data integrity checks and choose which branch to keep in the change checks section. diff --git a/docs/topics/readme.md b/docs/topics/readme.md new file mode 100644 index 0000000000..b8581d7df9 --- /dev/null +++ b/docs/topics/readme.md @@ -0,0 +1,12 @@ +# Topics + +Topics explain the concepts of Infrahub and how it works. + +- [Architecture](./architecture.md) +- [Artifact](./artifact.md) +- [Demo environment](./local-demo-environment.md) +- [GraphQL queries](./graphql.md) +- [Object storage](./object-storage.md) +- [Proposed change](./proposed-change.md) +- [Transformation](./transformation.md) +- [User management and authentication](./auth.md) diff --git a/docs/topics/repository.md b/docs/topics/repository.md new file mode 100644 index 0000000000..24851fe357 --- /dev/null +++ b/docs/topics/repository.md @@ -0,0 +1,9 @@ +--- +label: Repository +--- +# External Git repository + + +!!!warning +Coming Soon +!!! diff --git a/docs/knowledge-base/transformation.md b/docs/topics/transformation.md similarity index 62% rename from docs/knowledge-base/transformation.md rename to docs/topics/transformation.md index d183e2514b..4cb5eaba6a 100644 --- a/docs/knowledge-base/transformation.md +++ b/docs/topics/transformation.md @@ -1,63 +1,65 @@ --- label: Transformation layout: default -order: 900 --- # Transformation -A `Transformation` is a generic plugin to transform a dataset into a different format to simplify it's ingestion by a third party systems. +A `Transformation` is a generic plugin to transform a dataset into a different format to simplify it's ingestion by third-party systems. The output of a transformation can be either in JSON format or in plain text. -*Currently transformation must be written in Python but in the future more languages could be supported.* +>*Currently transformations must be written in Python, but in the future more languages could be supported.* !!!success Examples + - With the `Jinja Plugin` it's possible to generate any configuration files, in plain text format. -- With the `Python Plugin` its's possible to generate the payload expected by CloudFormation to configure a resource in AWS. +- With the `Python Plugin` it's possible to generate the payload expected by CloudFormation to configure a resource in AWS. + !!! ## High level design -A Transformation is composed of 2 main components: -- A **GraphQL Query** that will define what is the input data +A transformation is composed of 2 main components: + +- A **GraphQL query** that will define what the input data. - A **Transformation logic** that will process the data and transform it. ![](../media/transformation.excalidraw.svg) - !!! -The Transformation will automatically inherit the parameters (variables) defined by the GraphQL query. Depending on how the GraphQL query has been constructed, a transformation can be static or work for multiple objects. +The transformation will automatically inherit the parameters (variables) defined by the GraphQL query. Depending on how the GraphQL query has been constructed, a transformation can be static or work for multiple objects. !!! ==- Common parameters - -| Name | Type | Default | Required | { class="compact" } -| ------------------ | --------- | ----- | --- | -| **name** | `Text` | - | Yes | -| **label** | `Text` | - | No | -| **description** | `Text` | - | No | -| **timeout** | `Number` | 10 | No | -| **rebase** | `Boolean` | False | No | -| **query** | `Relationship`
CoreGraphQLQuery | - | Yes | -| **repository** | `Relationship`
CoreRepository | - | Yes | +{ class="compact" } +| Name | Type | Default | Required | +| --------------- | ----------------------------------- | ------- | -------- | +| **name** | `Text` | - | Yes | +| **label** | `Text` | - | No | +| **description** | `Text` | - | No | +| **timeout** | `Number` | 10 | No | +| **rebase** | `Boolean` | False | No | +| **query** | `Relationship`
CoreGraphQLQuery | - | Yes | +| **repository** | `Relationship`
CoreRepository | - | Yes | ==- -## Available Transformation +## Available transformations +{ class="compact" } | Namespace | Transformation | Description | Language | Output Format | -|-----------|---------------------|----------------------------------------|----------|---------------| +| --------- | ------------------- | -------------------------------------- | -------- | ------------- | | Core | **RFile** | A file rendered from a Jinja2 template | Jinja2 | Plain Text | | Core | **TransformPython** | A transform function written in Python | Python | JSON | +### RFile (Jinja2 plugin) -### RFile (Jinja2 Plugin) - -An RFile is a Transformation plugin for Jinja2, it can generate any file in plain text format and must be composed of 1 main Jinja2 template and 1 GraphQL Query. +An RFile is a transformation plugin for Jinja2, it can generate any file in plain text format and must be composed of 1 main Jinja2 template and 1 GraphQL query. #### Create an RFile The recommended way to create an RFile is to import it from a Git Repository. + - The main Jinja2 template can be in any directory in the repository - The GraphQL Query can be imported as well from the Git Repository or can be already existing in the database. @@ -77,30 +79,35 @@ rfiles: #### Render an RFile An RFile can be rendered with 3 different methods: + - On demand via the REST API -- As part of an [Artifact](./artifact.md) +- As part of an [artifact](./artifact.md) - In CLI for development and troubleshooting ##### From the REST API -A RFile can be rendered on demand via the REST API with the endpoint : `https:///api/rfile/` +An RFile can be rendered on demand via the REST API with the endpoint: `https:///api/rfile/` + +This endpoint is branch-aware and it accepts the name of the branch and/or the time as URL parameters. -This endpoint is branch aware and it accept the name of the branch and/or the time in as a URL parameters - `https:///api/rfile/?branch=branch33` - `https:///api/rfile/?branch=branch33&at=